code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): # noqa: E741
"""simple docstring"""
lowercase_ : Dict = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = 0
lowercase_ : Dict = [0] * n
lowercase_ : Optional[int] = [False] * n
lowercase_ : List[Any] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if parent == root:
out_edge_count += 1
lowercase_ : int = True
lowercase_ : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase_ : str = dfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase_ : str = True
# AP found via cycle
if at == low[to]:
lowercase_ : int = True
else:
lowercase_ : Tuple = min(low[at] , __SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(__SCREAMING_SNAKE_CASE ):
if not visited[i]:
lowercase_ : Optional[int] = 0
lowercase_ : Optional[int] = dfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE )
lowercase_ : int = out_edge_count > 1
for x in range(len(__SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(__SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 704
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 0
|
'''simple docstring'''
from PIL import Image
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = image.size
lowercase_ : Optional[Any] = 0
lowercase_ : str = image.load()
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_UpperCamelCase ):
for i in range(_UpperCamelCase ):
lowercase_ : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCamelCase__ = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class _UpperCAmelCase ( __lowercase ):
__lowerCamelCase: List[str] = 'owlvit_text_model'
def __init__( self : List[Any] , a : Any=4_9_4_0_8 , a : Any=5_1_2 , a : Optional[int]=2_0_4_8 , a : int=1_2 , a : List[Any]=8 , a : str=1_6 , a : Any="quick_gelu" , a : Dict=1e-5 , a : Tuple=0.0 , a : Union[str, Any]=0.02 , a : Any=1.0 , a : Dict=0 , a : Optional[int]=4_9_4_0_6 , a : Dict=4_9_4_0_7 , **a : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase_ : Optional[int] = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : str = intermediate_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : str = layer_norm_eps
lowercase_ : int = attention_dropout
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Tuple = initializer_factor
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , a : Union[str, os.PathLike] , **a : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(a )
lowercase_ : List[str] = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
lowercase_ : Any = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a , **a )
class _UpperCAmelCase ( __lowercase ):
__lowerCamelCase: str = 'owlvit_vision_model'
def __init__( self : List[str] , a : Any=7_6_8 , a : Any=3_0_7_2 , a : Any=1_2 , a : int=1_2 , a : Optional[Any]=3 , a : str=7_6_8 , a : Dict=3_2 , a : Union[str, Any]="quick_gelu" , a : str=1e-5 , a : List[str]=0.0 , a : Any=0.02 , a : Union[str, Any]=1.0 , **a : List[str] , ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : List[Any] = hidden_size
lowercase_ : List[Any] = intermediate_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Tuple = num_channels
lowercase_ : List[Any] = image_size
lowercase_ : Dict = patch_size
lowercase_ : Tuple = hidden_act
lowercase_ : Any = layer_norm_eps
lowercase_ : Tuple = attention_dropout
lowercase_ : int = initializer_range
lowercase_ : str = initializer_factor
@classmethod
def lowerCAmelCase__ ( cls : List[str] , a : Union[str, os.PathLike] , **a : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(a )
lowercase_ : Union[str, Any] = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
lowercase_ : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a , **a )
class _UpperCAmelCase ( __lowercase ):
__lowerCamelCase: Tuple = 'owlvit'
__lowerCamelCase: Union[str, Any] = True
def __init__( self : str , a : Optional[int]=None , a : List[str]=None , a : Tuple=5_1_2 , a : Union[str, Any]=2.6592 , a : str=True , **a : int , ):
'''simple docstring'''
super().__init__(**a )
if text_config is None:
lowercase_ : Union[str, Any] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
lowercase_ : str = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
lowercase_ : List[str] = OwlViTTextConfig(**a )
lowercase_ : str = OwlViTVisionConfig(**a )
lowercase_ : Tuple = projection_dim
lowercase_ : int = logit_scale_init_value
lowercase_ : List[str] = return_dict
lowercase_ : Optional[int] = 1.0
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , a : Union[str, os.PathLike] , **a : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(a )
lowercase_ : Optional[Any] = cls.get_config_dict(a , **a )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a , **a )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , a : Dict , a : Dict , **a : List[Any] ):
'''simple docstring'''
lowercase_ : List[str] = {}
lowercase_ : List[str] = text_config
lowercase_ : str = vision_config
return cls.from_dict(a , **a )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase_ : List[Any] = self.text_config.to_dict()
lowercase_ : str = self.vision_config.to_dict()
lowercase_ : Tuple = self.__class__.model_type
return output
class _UpperCAmelCase ( __lowercase ):
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
return 1e-4
def lowerCAmelCase__ ( self : Tuple , a : "ProcessorMixin" , a : int = -1 , a : int = -1 , a : Optional["TensorType"] = None , ):
'''simple docstring'''
lowercase_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=a , seq_length=a , framework=a )
lowercase_ : List[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=a , framework=a )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return 1_4
| 706
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCamelCase__ = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCamelCase__ = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = VOCAB_FILES_NAMES
__lowerCamelCase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: Any = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase: str = FunnelTokenizer
__lowerCamelCase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: Dict = 2
def __init__( self : Dict , a : Tuple=None , a : str=None , a : str=True , a : Tuple="<unk>" , a : List[Any]="<sep>" , a : Any="<pad>" , a : List[Any]="<cls>" , a : Optional[int]="<mask>" , a : Any="<s>" , a : List[str]="</s>" , a : List[Any]=True , a : str=True , a : str=None , a : str="##" , **a : List[Any] , ) -> Any:
'''simple docstring'''
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , clean_text=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , wordpieces_prefix=__UpperCamelCase , **__UpperCamelCase , )
lowercase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCamelCase ) != tokenize_chinese_chars
):
lowercase_ : List[Any] = getattr(__UpperCamelCase , normalizer_state.pop("type" ) )
lowercase_ : int = do_lower_case
lowercase_ : Optional[int] = strip_accents
lowercase_ : str = tokenize_chinese_chars
lowercase_ : Dict = normalizer_class(**__UpperCamelCase )
lowercase_ : Dict = do_lower_case
def lowerCAmelCase__ ( self : str , a : Union[str, Any] , a : int=None ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : int , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase_ : Dict = [self.sep_token_id]
lowercase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Optional[int] , a : str , a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
lowercase_ : Any = sum(_UpperCamelCase ) / len(_UpperCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 0
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = ''
lowercase_ : str = ''
lowercase_ : int = []
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = 2_5_6
lowercase_ : str = 0
lowercase_ : Union[str, Any] = 0
lowercase_ : Optional[int] = 0
lowercase_ : Union[str, Any] = 0
def lowerCAmelCase__ ( self : str , a : Optional[int] ):
'''simple docstring'''
lowercase_ : List[str] = cva.imread(UpperCamelCase__ , 0 )
lowercase_ : List[Any] = copy.deepcopy(self.img )
lowercase_ : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
lowercase_ : str = np.sum(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
lowercase_ : List[Any] = x[i] / self.k
self.sk += prk
lowercase_ : Optional[int] = (self.L - 1) * self.sk
if self.rem != 0:
lowercase_ : str = int(last % last )
lowercase_ : Any = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase__ )
lowercase_ : Tuple = int(np.ma.count(self.img ) / self.img[1].size )
lowercase_ : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase_ : Any = self.img[j][i]
if num != self.last_list[num]:
lowercase_ : Optional[Any] = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase__ = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
UpperCamelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 709
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 0
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase="pt" ):
"""simple docstring"""
lowercase_ : Optional[Any] = {"add_prefix_space": True} if isinstance(_UpperCamelCase , _UpperCamelCase ) and not line.startswith(" " ) else {}
lowercase_ : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=_UpperCamelCase , padding="max_length" if pad_to_max_length else None , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , ):
"""simple docstring"""
lowercase_ : Optional[Any] = input_ids.ne(_UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _UpperCAmelCase ( a__ ):
def __init__( self : List[str] , a : Tuple , a : str , a : Tuple , a : Any , a : Any="train" , a : Optional[int]=None , a : Optional[Any]=None , a : Tuple=None , a : str="" , ):
'''simple docstring'''
super().__init__()
lowercase_ : Dict = Path(lowerCamelCase_ ).joinpath(type_path + ".source" )
lowercase_ : Optional[int] = Path(lowerCamelCase_ ).joinpath(type_path + ".target" )
lowercase_ : str = self.get_char_lens(self.src_file )
lowercase_ : List[str] = max_source_length
lowercase_ : Any = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase_ : List[Any] = tokenizer
lowercase_ : Optional[Any] = prefix
if n_obs is not None:
lowercase_ : Dict = self.src_lens[:n_obs]
lowercase_ : int = src_lang
lowercase_ : Any = tgt_lang
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Optional[Any] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = index + 1 # linecache starts at 1
lowercase_ : Tuple = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("\n" )
lowercase_ : Optional[int] = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("\n" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
)
lowercase_ : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
lowercase_ : str = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , "right" )
lowercase_ : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , "right" )
lowercase_ : int = source_inputs["input_ids"].squeeze()
lowercase_ : List[Any] = target_inputs["input_ids"].squeeze()
lowercase_ : List[str] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase__ ( a : List[Any] ):
'''simple docstring'''
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def lowerCAmelCase__ ( self : int , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[int] = torch.stack([x["input_ids"] for x in batch] )
lowercase_ : Any = torch.stack([x["attention_mask"] for x in batch] )
lowercase_ : Tuple = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase_ : Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
lowercase_ : Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
lowercase_ : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ )
lowercase_ , lowercase_ : List[str] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ )
lowercase_ : int = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase__ = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return list(itertools.chain.from_iterable(_UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = get_git_info()
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , "git_log.json" ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=4 , **_UpperCamelCase ):
"""simple docstring"""
with open(_UpperCamelCase , "w" ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase , **_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
with open(_UpperCamelCase ) as f:
return json.load(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Union[str, Any] = git.Repo(search_parent_directories=_UpperCamelCase )
lowercase_ : Optional[int] = {
"repo_id": str(_UpperCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return list(map(_UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with open(_UpperCamelCase , "wb" ) as f:
return pickle.dump(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
def remove_articles(_UpperCamelCase ):
return re.sub(R"\b(a|an|the)\b" , " " , _UpperCamelCase )
def white_space_fix(_UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase ):
lowercase_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = normalize_answer(_UpperCamelCase ).split()
lowercase_ : int = normalize_answer(_UpperCamelCase ).split()
lowercase_ : str = Counter(_UpperCamelCase ) & Counter(_UpperCamelCase )
lowercase_ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
lowercase_ : Optional[int] = 1.0 * num_same / len(_UpperCamelCase )
lowercase_ : Dict = 1.0 * num_same / len(_UpperCamelCase )
lowercase_ : Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
lowercase_ : Dict = 0
for hypo, pred in zip(_UpperCamelCase , _UpperCamelCase ):
em += exact_match_score(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
em /= len(_UpperCamelCase )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return model_prefix.startswith("rag" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ : Any = "dropout_rate"
for p in extra_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if not hasattr(_UpperCamelCase , _UpperCamelCase ) and not hasattr(_UpperCamelCase , equivalent_param[p] ):
logger.info("config doesn\'t have a `{}` attribute".format(_UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
continue
lowercase_ : int = p if hasattr(_UpperCamelCase , _UpperCamelCase ) else equivalent_param[p]
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
return hparams, config
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
'''simple docstring'''
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = a
lowercase_ : Optional[Any] = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowercase_ : Optional[Any] = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
lowercase_ : Union[str, Any] = mid
else:
lowercase_ : int = mid
lowercase_ : List[str] = start + (end - start) / 2.0
return mid
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( __UpperCAmelCase ):
__lowerCamelCase: Dict = ['pixel_values']
def __init__( self : int , a : bool = True , a : Dict[str, int] = None , a : float = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Union[int, float] = 1 / 2_5_5 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase_ : int = size if size is not None else {"shortest_edge": 3_8_4}
lowercase_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
lowercase_ : List[Any] = do_resize
lowercase_ : int = size
# Default value set here for backwards compatibility where the value in config is None
lowercase_ : Optional[int] = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowercase_ : List[Any] = resample
lowercase_ : int = do_rescale
lowercase_ : int = rescale_factor
lowercase_ : Optional[Any] = do_normalize
lowercase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : Union[str, Any] , a : np.ndarray , a : Dict[str, int] , a : float , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" )
lowercase_ : List[str] = size["shortest_edge"]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowercase_ : Tuple = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
lowercase_ : Any = resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCAmelCase_ , size=(shortest_edge, shortest_edge) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCAmelCase_ , size=(shortest_edge, shortest_edge) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , ):
'''simple docstring'''
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ):
'''simple docstring'''
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : float = None , a : PILImageResampling = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : Any = do_resize if do_resize is not None else self.do_resize
lowercase_ : Tuple = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ : Any = resample if resample is not None else self.resample
lowercase_ : Any = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase_ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowercase_ : str = size if size is not None else self.size
lowercase_ : Any = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
lowercase_ : Any = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase_ : str = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
lowercase_ : Union[str, Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , crop_pct=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_rescale:
lowercase_ : List[Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
lowercase_ : Optional[int] = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
lowercase_ : Any = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
lowercase_ : Any = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 712
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase: Dict = ['image_processor', 'tokenizer']
__lowerCamelCase: int = 'FlavaImageProcessor'
__lowerCamelCase: Optional[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : int , a : Optional[Any]=None , a : Optional[Any]=None , **a : Dict ):
'''simple docstring'''
lowercase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
lowercase_ : Optional[Any] = kwargs.pop("feature_extractor" )
lowercase_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
lowercase_ : Optional[Any] = self.image_processor
def __call__( self : Optional[Any] , a : Tuple = None , a : int = None , a : List[str] = True , a : Optional[int] = False , a : List[Any] = False , a : str = None , a : Union[str, Any] = 0 , a : Tuple = None , a : Tuple = None , a : Union[str, Any] = None , a : List[Any] = None , a : str = None , a : Dict = False , a : Optional[int] = False , a : List[str] = False , a : Optional[int] = False , a : Optional[int] = True , a : str = None , **a : Tuple , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase_ : List[str] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
if images is not None:
lowercase_ : Any = self.image_processor(
_UpperCAmelCase , return_image_mask=_UpperCAmelCase , return_codebook_pixels=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(_UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] , *a : Dict , **a : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase__ ( self : List[str] , *a : List[Any] , **a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer.model_input_names
lowercase_ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
'''simple docstring'''
import math
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
lowercase_ : Any = 2
lowercase_ : List[str] = int(math.sqrt(UpperCAmelCase__ ) ) # Size of every segment
lowercase_ : int = [True] * (end + 1)
lowercase_ : Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(UpperCAmelCase__ )
for i in range(start * start , end + 1 , UpperCAmelCase__ ):
lowercase_ : Optional[int] = False
start += 1
prime += in_prime
lowercase_ : str = end + 1
lowercase_ : Any = min(2 * end , UpperCAmelCase__ )
while low <= n:
lowercase_ : int = [True] * (high - low + 1)
for each in in_prime:
lowercase_ : Union[str, Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(UpperCAmelCase__ , high + 1 , UpperCAmelCase__ ):
lowercase_ : List[Any] = False
for j in range(len(UpperCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
lowercase_ : int = high + 1
lowercase_ : Optional[Any] = min(high + end , UpperCAmelCase__ )
return prime
print(sieve(10**6))
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return (data["data"], data["target"])
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = XGBClassifier()
classifier.fit(UpperCAmelCase__ , UpperCAmelCase__ )
return classifier
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Union[str, Any] = load_iris()
lowercase_ , lowercase_ : int = data_handling(UpperCAmelCase__ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = train_test_split(
UpperCAmelCase__ , UpperCAmelCase__ , test_size=0.25 )
lowercase_ : List[str] = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowercase_ : Any = xgboost(UpperCAmelCase__ , UpperCAmelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , display_labels=UpperCAmelCase__ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 715
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( snake_case ):
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( a : str ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
| 716
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 0
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if isinstance(_UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : Tuple , a : int , a : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[Any] , a : List[Any] , a : Optional[Any] , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def lowerCAmelCase__ ( self : Any , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : int , a : Optional[int]=None , **a : str ):
'''simple docstring'''
lowercase_ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowercase_ : str = FlaxVisionTextDualEncoderModel(a )
lowercase_ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase__ ( self : Optional[int] , a : Optional[Any] , a : Dict , a : Dict , a : Optional[Any] , a : List[Any]=None , **a : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ : List[str] = self.get_vision_text_model(a , a )
lowercase_ : str = {"vision_model": vision_model, "text_model": text_model}
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowercase_ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase__ ( self : Tuple , a : List[Any] , a : Dict , a : Tuple , a : Optional[Any] , a : Any=None , **a : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ : List[str] = self.get_vision_text_model(a , a )
lowercase_ : Optional[int] = {"vision_model": vision_model, "text_model": text_model}
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowercase_ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowercase_ : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(a )
lowercase_ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowercase_ : List[Any] = after_output[0]
lowercase_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1e-3 )
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[Any] , a : str , a : str , a : Union[str, Any] , a : Dict=None , **a : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.get_vision_text_model(a , a )
lowercase_ : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowercase_ : int = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowercase_ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Optional[Any] = to_atuple(vision_model.config.image_size )
lowercase_ : str = to_atuple(vision_model.config.patch_size )
lowercase_ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase_ : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase_ : Any = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[Any] , a : Tuple , a : List[str] ):
'''simple docstring'''
pt_model.to(a )
pt_model.eval()
# prepare inputs
lowercase_ : Dict = inputs_dict
lowercase_ : int = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase_ : Dict = pt_model(**a ).to_tuple()
lowercase_ : Any = fx_model(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(a , from_pt=a )
lowercase_ : List[Any] = fx_model_loaded(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a )
lowercase_ : Any = VisionTextDualEncoderModel.from_pretrained(a , from_flax=a )
pt_model_loaded.to(a )
pt_model_loaded.eval()
with torch.no_grad():
lowercase_ : List[str] = pt_model_loaded(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(a , pt_output_loaded.numpy() , 4e-2 )
def lowerCAmelCase__ ( self : int , a : str , a : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : str = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowercase_ : List[str] = VisionTextDualEncoderModel(a )
lowercase_ : List[Any] = FlaxVisionTextDualEncoderModel(a )
lowercase_ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a )
lowercase_ : Union[str, Any] = fx_state
self.check_pt_flax_equivalence(a , a , a )
def lowerCAmelCase__ ( self : str , a : List[Any] , a : List[str] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowercase_ : Optional[int] = VisionTextDualEncoderModel(a )
lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(a )
lowercase_ : Any = load_flax_weights_in_pytorch_model(a , fx_model.params )
self.check_pt_flax_equivalence(a , a , a )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@is_pt_flax_cross_test
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : str = self.prepare_config_and_inputs()
lowercase_ : str = config_inputs_dict.pop("vision_config" )
lowercase_ : Tuple = config_inputs_dict.pop("text_config" )
lowercase_ : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(a , a , a )
self.check_equivalence_flax_to_pt(a , a , a )
@slow
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.get_pretrained_model_and_inputs()
lowercase_ : List[str] = model_a(**a )
lowercase_ : Optional[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowercase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(a )
lowercase_ : int = model_a(**a )
lowercase_ : List[str] = after_outputs[0]
lowercase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1e-5 )
@require_flax
class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=a , text_from_pt=a , )
lowercase_ : Optional[int] = 1_3
lowercase_ : List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : List[str] = random_attention_mask([batch_size, 4] )
lowercase_ : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self : List[str] , a : Optional[Any] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Dict = FlaxViTModel(a )
lowercase_ : Optional[Any] = FlaxBertModel(a )
return vision_model, text_model
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[int] = FlaxViTModelTester(self )
lowercase_ : str = FlaxBertModelTester(self )
lowercase_ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
lowercase_ : Any = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=a , text_from_pt=a , )
lowercase_ : int = 1_3
lowercase_ : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : str = random_attention_mask([batch_size, 4] )
lowercase_ : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self : Any , a : int , a : Tuple ):
'''simple docstring'''
lowercase_ : Optional[Any] = FlaxCLIPVisionModel(a )
lowercase_ : Optional[int] = FlaxBertModel(a )
return vision_model, text_model
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Tuple = FlaxCLIPVisionModelTester(self )
lowercase_ : int = FlaxBertModelTester(self )
lowercase_ : Dict = clip_model_tester.prepare_config_and_inputs()
lowercase_ : Tuple = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : List[str] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
lowercase_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ : Dict = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=a , padding=a , return_tensors="np" )
lowercase_ : Tuple = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase_ : List[str] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , a , atol=1e-3 ) )
| 717
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 719
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase__ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCamelCase__ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
UpperCamelCase__ = re.compile(r'\s*\(\s*\"(\S[^\"]+)\"')
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = False ):
"""simple docstring"""
with open(UpperCamelCase__ , "r" , encoding="utf-8" ) as f:
lowercase_ : Optional[int] = f.read()
lowercase_ : int = content.split("\n" )
lowercase_ : str = []
lowercase_ : List[Any] = 0
while line_idx < len(UpperCamelCase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase_ : Any = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase_ : Any = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase_ : List[Any] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase_ : List[Any] = sorted(UpperCamelCase__ , key=lambda _UpperCamelCase : _re_identifier.search(UpperCamelCase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(UpperCamelCase__ ) )
elif "\n".join(UpperCamelCase__ ) != content:
return True
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = False ):
"""simple docstring"""
lowercase_ : Optional[Any] = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for f in os.listdir(UpperCamelCase__ ) if f.endswith(".py" )]
lowercase_ : List[Any] = [sort_auto_mapping(UpperCamelCase__ , overwrite=UpperCamelCase__ ) for fname in fnames]
if not overwrite and any(UpperCamelCase__ ):
lowercase_ : Union[str, Any] = [f for f, d in zip(UpperCamelCase__ , UpperCamelCase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(UpperCamelCase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
UpperCamelCase__ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 720
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
UpperCamelCase__ = ['gpt2']
UpperCamelCase__ = 'gpt2'
if is_tf_available():
class _UpperCAmelCase ( tf.Module ):
def __init__( self : str , a : str ):
'''simple docstring'''
super().__init__()
lowercase_ : List[str] = tokenizer
lowercase_ : Optional[Any] = AutoConfig.from_pretrained(a )
lowercase_ : str = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def lowerCAmelCase__ ( self : Dict , a : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.tokenizer(a )
lowercase_ : str = tokenized["input_ids"].to_tensor()
lowercase_ : Union[str, Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowercase_ : Optional[Any] = self.model(input_ids=a , attention_mask=a )["logits"]
return outputs
@require_tf
@require_keras_nlp
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
super().setUp()
lowercase_ : Dict = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowercase_ : Optional[int] = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase_ : List[str] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we\'re going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowercase_ : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowercase_ : Union[str, Any] = tokenizer([test_inputs] , return_tensors="tf" )
lowercase_ : List[str] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowercase_ : List[Any] = python_outputs[key].numpy()
lowercase_ : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ : Union[str, Any] = tf.function(a )
for test_inputs in self.test_sentences:
lowercase_ : Optional[int] = tf.constant(a )
lowercase_ : Union[str, Any] = compiled_tokenizer(a )
lowercase_ : Union[str, Any] = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ : List[Any] = ModelToSave(tokenizer=a )
lowercase_ : Any = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ : Optional[int] = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase_ : str = Path(a ) / "saved.model"
tf.saved_model.save(a , a , signatures={"serving_default": model.serving} )
lowercase_ : str = tf.saved_model.load(a )
lowercase_ : Any = loaded_model.signatures["serving_default"](a )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ : List[str] = tf_tokenizer(a ) # Build model with some sample inputs
lowercase_ : Optional[Any] = tf_tokenizer.get_config()
lowercase_ : int = TFGPTaTokenizer.from_config(a )
lowercase_ : str = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowercase_ : Any = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowercase_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ : List[Any] = tf_tokenizer(a , max_length=a )
lowercase_ : int = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 721
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
__lowerCamelCase: str
__lowerCamelCase: str = None
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self : str , a : int , a : int , a : str , **a : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
'''simple docstring'''
return f"""`pip install {cls.pip_package or cls.name}`"""
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'optuna'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCAmelCase__ ( self : str , a : Any , a : int , a : str , **a : Dict ):
'''simple docstring'''
return run_hp_search_optuna(a , a , a , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : Dict ):
'''simple docstring'''
return default_hp_space_optuna(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Any = 'ray'
__lowerCamelCase: Optional[Any] = '\'ray[tune]\''
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCAmelCase__ ( self : List[str] , a : List[Any] , a : int , a : str , **a : List[str] ):
'''simple docstring'''
return run_hp_search_ray(a , a , a , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : Tuple ):
'''simple docstring'''
return default_hp_space_ray(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = 'sigopt'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any] , a : int , a : str , **a : int ):
'''simple docstring'''
return run_hp_search_sigopt(a , a , a , **a )
def lowerCAmelCase__ ( self : List[str] , a : Tuple ):
'''simple docstring'''
return default_hp_space_sigopt(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'wandb'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCAmelCase__ ( self : Dict , a : Optional[Any] , a : int , a : str , **a : Optional[Any] ):
'''simple docstring'''
return run_hp_search_wandb(a , a , a , **a )
def lowerCAmelCase__ ( self : List[Any] , a : Optional[Any] ):
'''simple docstring'''
return default_hp_space_wandb(a )
UpperCamelCase__ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
lowercase_ : List[str] = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
F"""{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 700
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 0
|
'''simple docstring'''
import operator as op
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : List[Any] = lambda _UpperCamelCase , _UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
lowercase_ : List[str] = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
else:
lowercase_ : Dict = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
lowercase_ : Optional[int] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_UpperCamelCase ) , int(_UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
UpperCamelCase__ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
from math import isclose, sqrt
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = point_y / 4 / point_x
lowercase_ : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowercase_ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowercase_ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowercase_ : Union[str, Any] = outgoing_gradient**2 + 4
lowercase_ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowercase_ : Optional[int] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowercase_ : Union[str, Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowercase_ : List[str] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowercase_ : Optional[int] = x_minus if isclose(_UpperCamelCase , _UpperCamelCase ) else x_plus
lowercase_ : List[str] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 1.4 , _UpperCamelCase = -9.6 ):
"""simple docstring"""
lowercase_ : int = 0
lowercase_ : float = first_x_coord
lowercase_ : float = first_y_coord
lowercase_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowercase_ : str = next_point(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [ord(_UpperCamelCase ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[str] = encode(input("-> " ).strip().lower() )
print("Encoded: " , _UpperCamelCase )
print("Decoded:" , decode(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 703
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 0
|
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 704
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 0
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase__ = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = SavedModel()
lowercase_ : Union[str, Any] = []
with open(os.path.join(_UpperCamelCase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
lowercase_ : int = json.load(_UpperCamelCase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_UpperCamelCase )] )
with open(_UpperCamelCase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
lowercase_ : Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowercase_ : Optional[int] = sorted(_UpperCamelCase )
lowercase_ : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_UpperCamelCase )
if strict and len(_UpperCamelCase ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_UpperCamelCase ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*_UpperCamelCase , sep="\n" )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCamelCase__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: str = CLIPTokenizer
__lowerCamelCase: Dict = CLIPTokenizerFast
__lowerCamelCase: Union[str, Any] = True
__lowerCamelCase: Dict = {}
__lowerCamelCase: Any = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase_ : int = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase_ : Tuple = dict(zip(a , range(len(a ) ) ) )
lowercase_ : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
lowercase_ : List[str] = {"unk_token": "<unk>"}
lowercase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def lowerCAmelCase__ ( self : str , **a : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def lowerCAmelCase__ ( self : Any , **a : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def lowerCAmelCase__ ( self : Optional[Any] , a : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = "lower newer"
lowercase_ : Any = "lower newer"
return input_text, output_text
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : Optional[int] = "lower newer"
lowercase_ : Any = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
lowercase_ : str = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase_ : str = tokens + [tokenizer.unk_token]
lowercase_ : str = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(a , **a )
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(a , **a )
lowercase_ : Optional[int] = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
lowercase_ : Optional[int] = tokenizer_s.tokenize(a )
lowercase_ : List[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase_ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
lowercase_ : int = tokenizer_s.tokenize(a )
lowercase_ : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
lowercase_ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase_ : Optional[int] = tokenizer_s.tokenize(a )
lowercase_ : Dict = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
lowercase_ : str = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase_ : Tuple = tokenizer_s.tokenize(a )
lowercase_ : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase_ : Tuple = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ : Union[str, Any] = f"""{text_of_1_token} {text_of_1_token}"""
lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
lowercase_ : Optional[Any] = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
lowercase_ : Any = f""" {text}"""
lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
lowercase_ : Tuple = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
| 706
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase__ = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48000,
'sample_size': 65536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48000,
'sample_size': 65536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48000,
'sample_size': 131072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return torch.atana(_UpperCamelCase , _UpperCamelCase ) / math.pi * 2
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = torch.sin(t * math.pi / 2 ) ** 2
lowercase_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_UpperCamelCase , _UpperCamelCase )
class _UpperCAmelCase ( snake_case ):
pass
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , a : Any ):
'''simple docstring'''
super().__init__()
lowercase_ : Optional[Any] = DiffusionAttnUnetaD(a , n_attn_layers=4 )
lowercase_ : List[Any] = deepcopy(self.diffusion )
lowercase_ : int = torch.quasirandom.SobolEngine(1 , scramble=a )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = MODELS_MAP[model_name]["url"]
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
UpperCamelCase__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
UpperCamelCase__ = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
UpperCamelCase__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
UpperCamelCase__ = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
UpperCamelCase__ = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
UpperCamelCase__ = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(_UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
return name.replace(_UpperCamelCase , _UpperCamelCase )
elif name.startswith(_UpperCamelCase ):
return [name.replace(_UpperCamelCase , _UpperCamelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=13 ):
"""simple docstring"""
lowercase_ : List[Any] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
lowercase_ : Optional[int] = 0
if string.startswith("net.3." ):
depth += 1
lowercase_ : int = string[6:]
elif string.startswith("net." ):
lowercase_ : Optional[Any] = string[4:]
while string.startswith("main.7." ):
depth += 1
lowercase_ : Optional[int] = string[7:]
if string.startswith("main." ):
lowercase_ : int = string[5:]
# mid block
if string[:2].isdigit():
lowercase_ : Any = string[:2]
lowercase_ : Dict = string[2:]
else:
lowercase_ : Optional[Any] = string[0]
lowercase_ : List[str] = string[1:]
if depth == max_depth:
lowercase_ : List[str] = MID_NUM_TO_LAYER[layer_num]
lowercase_ : Any = "mid_block"
elif depth > 0 and int(_UpperCamelCase ) < 7:
lowercase_ : Optional[int] = DOWN_NUM_TO_LAYER[layer_num]
lowercase_ : List[str] = F"""down_blocks.{depth}"""
elif depth > 0 and int(_UpperCamelCase ) > 7:
lowercase_ : str = UP_NUM_TO_LAYER[layer_num]
lowercase_ : Any = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
lowercase_ : int = DEPTH_0_TO_LAYER[layer_num]
lowercase_ : Optional[Any] = F"""up_blocks.{max_depth - 1}""" if int(_UpperCamelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
lowercase_ : Tuple = string_left[1:]
if "resnets" in new_layer:
lowercase_ : Tuple = convert_resconv_naming(_UpperCamelCase )
elif "attentions" in new_layer:
lowercase_ : Union[str, Any] = convert_attn_naming(_UpperCamelCase )
lowercase_ : Tuple = new_string_left
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
lowercase_ : Optional[int] = prefix + "." + new_layer + "." + string_left
else:
lowercase_ : Any = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
lowercase_ : List[str] = rename(_UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowercase_ : List[str] = transform_conv_attns(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
lowercase_ : Tuple = v
return new_state_dict
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if len(_UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
lowercase_ : List[Any] = v[:, :, 0]
else:
# bias
lowercase_ : Any = v
else:
# qkv matrices
lowercase_ : str = v.shape[0]
lowercase_ : Optional[int] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowercase_ : Any = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowercase_ : List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowercase_ : List[Any] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
lowercase_ : Optional[Any] = download(_UpperCamelCase )
lowercase_ : Any = MODELS_MAP[model_name]["sample_rate"]
lowercase_ : List[str] = MODELS_MAP[model_name]["sample_size"]
lowercase_ : str = Object()
lowercase_ : List[str] = sample_size
lowercase_ : List[Any] = sample_rate
lowercase_ : Any = 0
lowercase_ : Any = UNetaDModel(sample_size=_UpperCamelCase , sample_rate=_UpperCamelCase )
lowercase_ : Any = diffusers_model.state_dict()
lowercase_ : int = DiffusionUncond(_UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_UpperCamelCase )["state_dict"] )
lowercase_ : Dict = orig_model.diffusion_ema.eval()
lowercase_ : Dict = orig_model.state_dict()
lowercase_ : str = rename_orig_weights(_UpperCamelCase )
lowercase_ : int = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowercase_ : int = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("kernel" ) for k in list(_UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
lowercase_ : int = value.squeeze()
lowercase_ : List[Any] = value
diffusers_model.load_state_dict(_UpperCamelCase )
lowercase_ : int = 100
lowercase_ : List[Any] = 33
lowercase_ : Union[str, Any] = IPNDMScheduler(num_train_timesteps=_UpperCamelCase )
lowercase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
lowercase_ : Any = torch.randn([1, 2, config.sample_size] , generator=_UpperCamelCase ).to(_UpperCamelCase )
lowercase_ : int = torch.linspace(1 , 0 , steps + 1 , device=_UpperCamelCase )[:-1]
lowercase_ : int = get_crash_schedule(_UpperCamelCase )
lowercase_ : str = DanceDiffusionPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
lowercase_ : Optional[Any] = torch.manual_seed(33 )
lowercase_ : List[Any] = pipe(num_inference_steps=_UpperCamelCase , generator=_UpperCamelCase ).audios
lowercase_ : int = sampling.iplms_sample(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {} )
lowercase_ : Any = generated.clamp(-1 , 1 )
lowercase_ : Tuple = (generated - audio).abs().sum()
lowercase_ : Tuple = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _UpperCamelCase )
print("Diff max" , _UpperCamelCase )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCamelCase__ = parser.parse_args()
main(args)
| 708
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 0
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
lowercase_ : Tuple = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
lowercase_ : Any = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
lowercase_ : Optional[int] = transform(_UpperCamelCase ).unsqueeze(0 ).to(_UpperCamelCase )
return image
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if "visual_encoder" in key:
lowercase_ : List[str] = re.sub("visual_encoder*" , "vision_model.encoder" , _UpperCamelCase )
if "blocks" in key:
lowercase_ : int = re.sub(R"blocks" , "layers" , _UpperCamelCase )
if "attn" in key:
lowercase_ : Dict = re.sub(R"attn" , "self_attn" , _UpperCamelCase )
if "norm1" in key:
lowercase_ : Optional[Any] = re.sub(R"norm1" , "layer_norm1" , _UpperCamelCase )
if "norm2" in key:
lowercase_ : Dict = re.sub(R"norm2" , "layer_norm2" , _UpperCamelCase )
if "encoder.norm" in key:
lowercase_ : int = re.sub(R"encoder.norm" , "post_layernorm" , _UpperCamelCase )
if "encoder.patch_embed.proj" in key:
lowercase_ : Optional[Any] = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , _UpperCamelCase )
if "encoder.pos_embed" in key:
lowercase_ : int = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , _UpperCamelCase )
if "encoder.cls_token" in key:
lowercase_ : Union[str, Any] = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , _UpperCamelCase )
if "self_attn" in key:
lowercase_ : Dict = re.sub(R"self_attn.proj" , "self_attn.projection" , _UpperCamelCase )
return key
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if config_path is not None:
lowercase_ : List[Any] = BlipConfig.from_pretrained(_UpperCamelCase )
else:
lowercase_ : Optional[int] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase_ : List[str] = BlipForConditionalGeneration(_UpperCamelCase ).eval()
lowercase_ : Union[str, Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
lowercase_ : Tuple = blip_decoder(pretrained=_UpperCamelCase , image_size=384 , vit="base" )
lowercase_ : List[str] = pt_model.eval()
lowercase_ : List[str] = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase_ : Dict = modified_state_dict.pop(_UpperCamelCase )
lowercase_ : Tuple = rename_key(_UpperCamelCase )
lowercase_ : List[str] = value
hf_model.load_state_dict(_UpperCamelCase )
lowercase_ : Tuple = 384
lowercase_ : str = load_demo_image(image_size=_UpperCamelCase , device="cpu" )
lowercase_ : Any = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase_ : Dict = tokenizer(["a picture of"] ).input_ids
lowercase_ : Optional[Any] = hf_model.generate(_UpperCamelCase , _UpperCamelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase_ : int = hf_model.generate(_UpperCamelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase_ : int = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
lowercase_ : str = blip_vqa(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit="base" )
vqa_model.eval()
lowercase_ : List[str] = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase_ : str = modified_state_dict.pop(_UpperCamelCase )
lowercase_ : int = rename_key(_UpperCamelCase )
lowercase_ : Any = value
lowercase_ : List[Any] = BlipForQuestionAnswering(_UpperCamelCase )
hf_vqa_model.load_state_dict(_UpperCamelCase )
lowercase_ : str = ["How many dogs are in this image?"]
lowercase_ : Tuple = tokenizer(_UpperCamelCase , return_tensors="pt" ).input_ids
lowercase_ : Dict = hf_vqa_model.generate(_UpperCamelCase , _UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
lowercase_ : List[str] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
lowercase_ : int = blip_itm(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit="base" )
itm_model.eval()
lowercase_ : Any = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase_ : str = modified_state_dict.pop(_UpperCamelCase )
lowercase_ : Union[str, Any] = rename_key(_UpperCamelCase )
lowercase_ : Union[str, Any] = value
lowercase_ : List[Any] = BlipForImageTextRetrieval(_UpperCamelCase )
lowercase_ : Dict = ["A picture of a woman with a dog sitting in a beach"]
lowercase_ : int = tokenizer(
_UpperCamelCase , return_tensors="pt" , padding="max_length" , truncation=_UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCamelCase )
hf_itm_model.eval()
lowercase_ : int = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
lowercase_ : Optional[Any] = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCamelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 709
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 0
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
__lowerCamelCase: Optional[datasets.Features] = None
__lowerCamelCase: str = "utf-8"
__lowerCamelCase: Optional[str] = None
__lowerCamelCase: Optional[str] = None
__lowerCamelCase: bool = True # deprecated
__lowerCamelCase: Optional[int] = None # deprecated
__lowerCamelCase: int = 10 << 20 # 10MB
__lowerCamelCase: Optional[bool] = None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
__lowerCamelCase: int = JsonConfig
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
lowercase_ : Dict = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : List[Any] , a : Optional[Any] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase_ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a , (str, list, tuple) ):
lowercase_ : int = data_files
if isinstance(a , a ):
lowercase_ : str = [files]
lowercase_ : Optional[Any] = [dl_manager.iter_files(a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowercase_ : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(a , a ):
lowercase_ : Dict = [files]
lowercase_ : Any = [dl_manager.iter_files(a ) for file in files]
splits.append(datasets.SplitGenerator(name=a , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self : Optional[int] , a : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase_ : List[Any] = self.config.features.arrow_schema.field(a ).type
lowercase_ : List[Any] = pa_table.append_column(a , pa.array([None] * len(a ) , type=a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ : str = table_cast(a , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ : List[Any] = json.load(a )
# We keep only the field we are interested in
lowercase_ : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(a , (list, tuple) ):
lowercase_ : int = set().union(*[row.keys() for row in dataset] )
lowercase_ : str = {col: [row.get(a ) for row in dataset] for col in keys}
else:
lowercase_ : List[Any] = dataset
lowercase_ : Optional[Any] = pa.Table.from_pydict(a )
yield file_idx, self._cast_table(a )
# If the file has one json object per line
else:
with open(a , "rb" ) as f:
lowercase_ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase_ : str = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
lowercase_ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
lowercase_ : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase_ : int = batch.decode(self.config.encoding , errors=a ).encode("utf-8" )
try:
while True:
try:
lowercase_ : Union[str, Any] = paj.read_json(
io.BytesIO(a ) , read_options=paj.ReadOptions(block_size=a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(a , pa.ArrowInvalid )
and "straddling" not in str(a )
or block_size > len(a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(a )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ : int = json.load(a )
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(a )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(a , a ): # list is the only sequence type supported in JSON
try:
lowercase_ : List[str] = set().union(*[row.keys() for row in dataset] )
lowercase_ : Optional[int] = {col: [row.get(a ) for row in dataset] for col in keys}
lowercase_ : int = pa.Table.from_pydict(a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(a )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(a )
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(a )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a )
batch_idx += 1
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
'''simple docstring'''
import numpy as np
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = int(np.ceil((x_end - xa) / h ) )
lowercase_ : int = np.zeros((n + 1,) )
lowercase_ : Union[str, Any] = ya
lowercase_ : Optional[int] = xa
for k in range(_UpperCamelCase ):
lowercase_ : List[Any] = f(_UpperCamelCase , y[k] )
lowercase_ : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase_ : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase_ : List[str] = f(x + h , y[k] + h * ka )
lowercase_ : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 0
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = os.path.join(args.tf_model_dir , "parameters.json" )
lowercase_ : str = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
lowercase_ : str = args.output + ".pt"
lowercase_ : Any = OrderedDict()
with tf.device("/CPU:0" ):
lowercase_ : str = tf.train.load_checkpoint(args.tf_model_dir )
lowercase_ : Any = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase_ : Optional[Any] = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
lowercase_ : Optional[int] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
lowercase_ : Tuple = 8
lowercase_ : str = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Optional[int] = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/moe" ):
lowercase_ : Dict = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
lowercase_ : Optional[Any] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
lowercase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Tuple = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
lowercase_ : str = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Any = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
lowercase_ : List[str] = key_name[-9:-7]
for i in range(16 ):
lowercase_ : Optional[int] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
lowercase_ : Optional[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase_ : Optional[int] = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/mlp" ):
lowercase_ : Dict = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
lowercase_ : List[Any] = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
lowercase_ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Tuple = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p1/bias" ):
lowercase_ : Optional[int] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
lowercase_ : Tuple = vnp.copy() # same because it is one dimensional
lowercase_ : List[str] = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/kernel" ):
lowercase_ : Optional[int] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
lowercase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Tuple = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/bias" ):
lowercase_ : Tuple = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
lowercase_ : Dict = vnp.copy() # same because it is one dimensional
lowercase_ : Dict = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/ln" ):
lowercase_ : Tuple = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowercase_ : Dict = "model.blocks.%d.feed_forward.norm.bias" % player
lowercase_ : Optional[int] = vnp.copy() # same because it is one dimensional
lowercase_ : Union[str, Any] = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
lowercase_ : List[str] = "model.blocks.%d.feed_forward.norm.weight" % player
lowercase_ : Dict = vnp.copy() # same because it is one dimensional
lowercase_ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/att" ):
lowercase_ : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
lowercase_ : Optional[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase_ : Union[str, Any] = state[:, 0, :, :]
lowercase_ : int = state[:, 1, :, :]
lowercase_ : Any = state[:, 2, :, :]
lowercase_ : List[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Union[str, Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
lowercase_ : Dict = torch.tensor(_UpperCamelCase )
lowercase_ : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
lowercase_ : Optional[int] = torch.tensor(_UpperCamelCase )
lowercase_ : Tuple = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
lowercase_ : int = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/o/kernel" ):
lowercase_ : List[str] = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
lowercase_ : List[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Optional[int] = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/an" ):
lowercase_ : str = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowercase_ : str = "model.blocks.%d.self_attn.norm.bias" % player
lowercase_ : str = vnp.copy() # same because it is one dimensional
lowercase_ : int = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
lowercase_ : List[Any] = "model.blocks.%d.self_attn.norm.weight" % player
lowercase_ : Dict = vnp.copy() # same because it is one dimensional
lowercase_ : Optional[Any] = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
lowercase_ : Tuple = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
lowercase_ : Tuple = "model.%s.weight" % nlayer
lowercase_ : Tuple = vnp.copy() # same in embedded
lowercase_ : Any = torch.tensor(_UpperCamelCase )
if key_name.startswith("model/wte" ):
lowercase_ : Any = "lm_head.weight"
lowercase_ : List[Any] = vnp.copy() # same in embedded
lowercase_ : int = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/wob" ):
lowercase_ : Any = "final_logits_bias"
lowercase_ : Any = vnp.copy() # same in embedded
lowercase_ : Optional[Any] = state.reshape((1, -1) )
lowercase_ : Any = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
lowercase_ : Optional[Any] = "model.last_project.weight"
lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : int = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
lowercase_ : List[Any] = "model.last_project.bias"
lowercase_ : Any = vnp.copy() # same because it is one dimensional
lowercase_ : int = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
UpperCamelCase__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 712
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _UpperCAmelCase ( snake_case ):
@add_start_docstrings(a )
def __call__( self : Any , a : torch.LongTensor , a : torch.FloatTensor , **a : Dict ):
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class _UpperCAmelCase ( snake_case ):
def __init__( self : str , a : int , a : Optional[int] = None ):
'''simple docstring'''
lowercase_ : List[Any] = max_length
lowercase_ : List[Any] = max_position_embeddings
@add_start_docstrings(a )
def __call__( self : Union[str, Any] , a : torch.LongTensor , a : torch.FloatTensor , **a : List[str] ):
'''simple docstring'''
lowercase_ : Tuple = input_ids.shape[-1]
lowercase_ : str = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : int , a : int ):
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , a , )
lowercase_ : List[Any] = start_length
lowercase_ : Tuple = max_new_tokens
lowercase_ : str = start_length + max_new_tokens
@add_start_docstrings(a )
def __call__( self : int , a : torch.LongTensor , a : torch.FloatTensor , **a : Optional[int] ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _UpperCAmelCase ( snake_case ):
def __init__( self : Optional[int] , a : float , a : Optional[float] = None ):
'''simple docstring'''
lowercase_ : Tuple = max_time
lowercase_ : Optional[int] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(a )
def __call__( self : int , a : torch.LongTensor , a : torch.FloatTensor , **a : Optional[Any] ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _UpperCAmelCase ( snake_case ):
@add_start_docstrings(a )
def __call__( self : List[Any] , a : torch.LongTensor , a : torch.FloatTensor , **a : List[Any] ):
'''simple docstring'''
return any(criteria(a , a ) for criteria in self )
@property
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(a , a ):
return stopping_criterium.max_length
elif isinstance(a , a ):
return stopping_criterium.max_length
return None
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = stopping_criteria.max_length
lowercase_ : List[str] = deepcopy(_UpperCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _UpperCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCamelCase ) )
return new_stopping_criteria
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
'''simple docstring'''
import math
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 1_0001 ):
"""simple docstring"""
try:
lowercase_ : List[Any] = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
lowercase_ : list[int] = []
lowercase_ : Dict = 2
while len(_UpperCamelCase ) < nth:
if is_prime(_UpperCamelCase ):
primes.append(_UpperCamelCase )
num += 1
else:
num += 1
return primes[len(_UpperCamelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase__ = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = tmp_path_factory.getbasetemp() / "cache"
lowercase_ : List[Any] = test_hf_cache_home / "datasets"
lowercase_ : Optional[Any] = test_hf_cache_home / "metrics"
lowercase_ : str = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_UpperCamelCase ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_UpperCamelCase ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_UpperCamelCase ) )
lowercase_ : Union[str, Any] = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_UpperCamelCase ) )
lowercase_ : List[str] = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_UpperCamelCase ) )
@pytest.fixture(autouse=_UpperCamelCase , scope="session" )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _UpperCamelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _UpperCamelCase )
| 715
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
try:
lowercase_ : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase_ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase_ : Tuple = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skip("Test was skipped" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , "test is slow" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version(">=" , _UpperCamelCase ) , F"""test requires torch version >= {version}""" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_UpperCamelCase )
UpperCamelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_UpperCamelCase )
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: Union[str, Any] = True
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls : List[str] ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a )
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[Any] , a : Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
lowercase_ : int = mocks if isinstance(a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = AcceleratorState()
lowercase_ : List[Any] = tensor[None].clone().to(state.device )
lowercase_ : Any = gather(_UpperCamelCase ).cpu()
lowercase_ : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class _UpperCAmelCase :
def __init__( self : List[Any] , a : str , a : Any , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = returncode
lowercase_ : str = stdout
lowercase_ : List[Any] = stderr
async def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
while True:
lowercase_ : Optional[int] = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(_UpperCamelCase ) )
lowercase_ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase_ : Tuple = []
lowercase_ : Any = []
def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
lowercase_ : List[Any] = line.decode("utf-8" ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label="stderr:" ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ):
"""simple docstring"""
lowercase_ : Dict = asyncio.get_event_loop()
lowercase_ : List[str] = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
lowercase_ : Optional[int] = " ".join(_UpperCamelCase )
if result.returncode > 0:
lowercase_ : str = "\n".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class _UpperCAmelCase ( snake_case ):
pass
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
try:
lowercase_ : List[Any] = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , "decode" ):
lowercase_ : List[str] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 716
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 0
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class _UpperCAmelCase :
def __init__( self : Dict ):
'''simple docstring'''
lowercase_ : Tuple = psutil.Process()
lowercase_ : Union[str, Any] = False
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Tuple = -1
while True:
lowercase_ : List[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[int] = True
lowercase_ : Any = threading.Thread(target=self.peak_monitor )
lowercase_ : Optional[int] = True
self.thread.start()
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = False
self.thread.join()
return self.cpu_memory_peak
UpperCamelCase__ = PeakCPUMemory()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Tuple = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase_ : Dict = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase_ : Union[str, Any] = torch.cuda.memory_allocated(_UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase_ : Any = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
lowercase_ : int = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase_ : List[str] = (torch.cuda.memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
lowercase_ : Optional[Any] = (torch.cuda.max_memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
return measures
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCamelCase )]:.2f}MiB""" )
lowercase_ : List[str] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 717
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 0
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 718
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 0
|
'''simple docstring'''
from string import ascii_uppercase
UpperCamelCase__ = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCamelCase__ = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = len(_UpperCamelCase )
lowercase_ : str = 0
while True:
if x == i:
lowercase_ : Dict = 0
if len(_UpperCamelCase ) == len(_UpperCamelCase ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = ""
lowercase_ : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = ""
lowercase_ : Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase_ : Optional[Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : str = "THE GERMAN ATTACK"
lowercase_ : Dict = "SECRET"
lowercase_ : Any = generate_key(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Union[str, Any] = cipher_text(_UpperCamelCase , _UpperCamelCase )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(_UpperCamelCase , _UpperCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = filter(lambda _UpperCamelCase : p.requires_grad , model.parameters() )
lowercase_ : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase__ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if metric == "rouge2":
lowercase_ : Optional[int] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
lowercase_ : Union[str, Any] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
lowercase_ : Tuple = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
lowercase_ : Any = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
lowercase_ : List[str] = ModelCheckpoint(
dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=_UpperCamelCase , verbose=_UpperCamelCase , )
class _UpperCAmelCase ( pl.Callback ):
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str] , a : int ):
'''simple docstring'''
lowercase_ : Dict = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(a )
@rank_zero_only
def lowerCAmelCase__ ( self : List[str] , a : pl.Trainer , a : pl.LightningModule , a : str , a : Optional[Any]=True ):
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowercase_ : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowercase_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase_ : List[Any] = od / "test_results.txt"
lowercase_ : Dict = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ : List[str] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowercase_ : List[Any] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=a )
generations_file.parent.mkdir(exist_ok=a )
with open(a , "a+" ) as writer:
for key in sorted(a ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ : List[str] = metrics[key]
if isinstance(a , torch.Tensor ):
lowercase_ : Dict = val.item()
lowercase_ : int = f"""{key}: {val:.6f}\n"""
writer.write(a )
if not save_generations:
return
if "preds" in metrics:
lowercase_ : str = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(a )
@rank_zero_only
def lowerCAmelCase__ ( self : str , a : Optional[Any] , a : Any ):
'''simple docstring'''
try:
lowercase_ : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ : List[Any] = pl_module.model.num_parameters()
lowercase_ : Union[str, Any] = count_trainable_parameters(a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCAmelCase__ ( self : Union[str, Any] , a : pl.Trainer , a : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(a , a , "test" )
@rank_zero_only
def lowerCAmelCase__ ( self : Any , a : pl.Trainer , a : Optional[int] ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 720
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _UpperCAmelCase ( snake_case ):
pass
class _UpperCAmelCase :
def __init__( self : int , a : Any ):
'''simple docstring'''
lowercase_ : Any = data
lowercase_ : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
lowercase_ : Tuple = self
lowercase_ : List[Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(a )
yield node.data
lowercase_ : str = node.next_node
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCamelCase__ = Node(1)
UpperCamelCase__ = Node(2)
UpperCamelCase__ = Node(3)
UpperCamelCase__ = Node(4)
print(root_node.has_loop) # False
UpperCamelCase__ = root_node.next_node
print(root_node.has_loop) # True
UpperCamelCase__ = Node(5)
UpperCamelCase__ = Node(6)
UpperCamelCase__ = Node(5)
UpperCamelCase__ = Node(6)
print(root_node.has_loop) # False
UpperCamelCase__ = Node(1)
print(root_node.has_loop) # False
| 721
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = 0
for ch in input_str:
lowercase_ : Dict = ord(_UpperCamelCase )
lowercase_ : Dict = pow(2 , _UpperCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ = {
'google/rembert': 256,
}
UpperCamelCase__ = '▁'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = VOCAB_FILES_NAMES
__lowerCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: int = RemBertTokenizer
def __init__( self : Dict , a : List[str]=None , a : Optional[int]=None , a : List[Any]=True , a : List[str]=True , a : Tuple=False , a : Optional[int]="[CLS]" , a : List[str]="[SEP]" , a : Dict="<unk>" , a : Tuple="[SEP]" , a : Optional[int]="<pad>" , a : Optional[int]="[CLS]" , a : str="[MASK]" , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowercase_ : List[str] = do_lower_case
lowercase_ : Union[str, Any] = remove_space
lowercase_ : Union[str, Any] = keep_accents
lowercase_ : Optional[Any] = vocab_file
lowercase_ : str = False if not self.vocab_file else True
def lowerCAmelCase__ ( self : str , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def lowerCAmelCase__ ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Tuple , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error("Vocabulary path ({}) should be a directory".format(a ) )
return
lowercase_ : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 703
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 0
|
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : List[str] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase_ : Optional[Any] = dict(zip(a , range(len(a ) ) ) )
lowercase_ : Union[str, Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase_ : List[Any] = {"unk_token": "<unk>"}
lowercase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
lowercase_ : Any = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[Any] = os.path.join(self.tmpdirname , a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a , a )
def lowerCAmelCase__ ( self : int , **a : Any ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **a )
def lowerCAmelCase__ ( self : List[Any] , **a : str ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **a )
def lowerCAmelCase__ ( self : Any , **a : Dict ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **a )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase_ : Tuple = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : Dict = self.get_rust_tokenizer()
lowercase_ : str = self.get_image_processor()
lowercase_ : List[str] = OwlViTProcessor(tokenizer=a , image_processor=a )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=a )
lowercase_ : Optional[int] = OwlViTProcessor(tokenizer=a , image_processor=a )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a )
self.assertIsInstance(processor_fast.tokenizer , a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a )
self.assertIsInstance(processor_fast.image_processor , a )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : str = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase_ : Dict = self.get_image_processor(do_normalize=a )
lowercase_ : str = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[Any] = self.get_image_processor()
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[str] = OwlViTProcessor(tokenizer=a , image_processor=a )
lowercase_ : Any = self.prepare_image_inputs()
lowercase_ : Any = image_processor(a , return_tensors="np" )
lowercase_ : Optional[Any] = processor(images=a , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : Optional[Any] = OwlViTProcessor(tokenizer=a , image_processor=a )
lowercase_ : List[Any] = "lower newer"
lowercase_ : Dict = processor(text=a , return_tensors="np" )
lowercase_ : Optional[int] = tokenizer(a , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : Tuple = OwlViTProcessor(tokenizer=a , image_processor=a )
lowercase_ : Optional[int] = "lower newer"
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : int = processor(text=a , images=a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : str = "google/owlvit-base-patch32"
lowercase_ : Optional[int] = OwlViTProcessor.from_pretrained(a )
lowercase_ : Any = ["cat", "nasa badge"]
lowercase_ : List[Any] = processor(text=a )
lowercase_ : Any = 1_6
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : int = "google/owlvit-base-patch32"
lowercase_ : Union[str, Any] = OwlViTProcessor.from_pretrained(a )
lowercase_ : Optional[int] = [["cat", "nasa badge"], ["person"]]
lowercase_ : Dict = processor(text=a )
lowercase_ : Tuple = 1_6
lowercase_ : List[Any] = len(a )
lowercase_ : Optional[Any] = max([len(a ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[int] = "google/owlvit-base-patch32"
lowercase_ : int = OwlViTProcessor.from_pretrained(a )
lowercase_ : Tuple = ["cat", "nasa badge"]
lowercase_ : Optional[int] = processor(text=a )
lowercase_ : Dict = 1_6
lowercase_ : int = inputs["input_ids"]
lowercase_ : Union[str, Any] = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : Optional[Any] = OwlViTProcessor(tokenizer=a , image_processor=a )
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : List[str] = self.prepare_image_inputs()
lowercase_ : Union[str, Any] = processor(images=a , query_images=a )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : Any = OwlViTProcessor(tokenizer=a , image_processor=a )
lowercase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : Union[str, Any] = processor.batch_decode(a )
lowercase_ : List[str] = tokenizer.batch_decode(a )
self.assertListEqual(a , a )
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase__ = HfApi()
UpperCamelCase__ = {}
# fmt: off
UpperCamelCase__ = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
UpperCamelCase__ = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
UpperCamelCase__ = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
UpperCamelCase__ = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
UpperCamelCase__ = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
UpperCamelCase__ = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
UpperCamelCase__ = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
UpperCamelCase__ = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
UpperCamelCase__ = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
UpperCamelCase__ = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
UpperCamelCase__ = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
UpperCamelCase__ = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
UpperCamelCase__ = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
UpperCamelCase__ = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
UpperCamelCase__ = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
UpperCamelCase__ = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase__ = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
UpperCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
UpperCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 706
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = 0, 1
while True:
lowercase_ : Tuple = b, a + b
yield b
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 1000 ):
"""simple docstring"""
lowercase_ : Optional[Any] = 1
lowercase_ : Optional[int] = fibonacci_generator()
while len(str(next(_UpperCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Tuple = torch.nn.Linear(1_0 , 1_0 )
lowercase_ : Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1 )
lowercase_ : Optional[int] = Accelerator()
lowercase_ : Optional[int] = accelerator.prepare(a )
try:
pickle.loads(pickle.dumps(a ) )
except Exception as e:
self.fail(f"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 708
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCamelCase__ = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = "mumbai" ):
"""simple docstring"""
lowercase_ : int = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
lowercase_ : Optional[int] = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
lowercase_ : Dict = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 709
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if len(_UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(_UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
lowercase_ : Any = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCamelCase ) )
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCamelCase ) )
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if len(_UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
lowercase_ : List[Any] = len(_UpperCamelCase )
lowercase_ : Dict = matrix_length // 2
lowercase_ : str = [[a[i][j] for j in range(_UpperCamelCase , _UpperCamelCase )] for i in range(_UpperCamelCase )]
lowercase_ : int = [
[a[i][j] for j in range(_UpperCamelCase , _UpperCamelCase )] for i in range(_UpperCamelCase , _UpperCamelCase )
]
lowercase_ : Tuple = [[a[i][j] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase )]
lowercase_ : Union[str, Any] = [[a[i][j] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase , _UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return len(_UpperCamelCase ), len(matrix[0] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
print("\n".join(str(_UpperCamelCase ) for line in matrix ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if matrix_dimensions(_UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(_UpperCamelCase , _UpperCamelCase )
lowercase_ : List[str] = split_matrix(_UpperCamelCase )
lowercase_ : Any = split_matrix(_UpperCamelCase )
lowercase_ : Optional[int] = actual_strassen(_UpperCamelCase , matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) )
lowercase_ : List[Any] = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
lowercase_ : Any = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
lowercase_ : List[Any] = actual_strassen(_UpperCamelCase , matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) )
lowercase_ : List[str] = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
lowercase_ : Optional[int] = actual_strassen(matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
lowercase_ : List[str] = actual_strassen(matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
lowercase_ : Dict = matrix_addition(matrix_subtraction(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) , _UpperCamelCase )
lowercase_ : List[Any] = matrix_addition(_UpperCamelCase , _UpperCamelCase )
lowercase_ : List[str] = matrix_addition(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) , _UpperCamelCase )
# construct the new matrix from our 4 quadrants
lowercase_ : Union[str, Any] = []
for i in range(len(_UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if matrix_dimensions(_UpperCamelCase )[1] != matrix_dimensions(_UpperCamelCase )[0]:
lowercase_ : Union[str, Any] = (
"Unable to multiply these matrices, please check the dimensions.\n"
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(_UpperCamelCase )
lowercase_ : List[Any] = matrix_dimensions(_UpperCamelCase )
lowercase_ : str = matrix_dimensions(_UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase_ : Any = max(*_UpperCamelCase , *_UpperCamelCase )
lowercase_ : str = int(math.pow(2 , math.ceil(math.loga(_UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = matrixa
lowercase_ : List[str] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase_ : Optional[Any] = actual_strassen(_UpperCamelCase , _UpperCamelCase )
# Removing the additional zeros
for i in range(0 , _UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase__ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase__ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 711
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = len([g for position, g in enumerate(_UpperCamelCase ) if g == main_target[position]] )
return (item, float(_UpperCamelCase ))
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = random.randint(0 , len(_UpperCamelCase ) - 1 )
lowercase_ : Any = parent_a[:random_slice] + parent_a[random_slice:]
lowercase_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = list(_UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase_ : Optional[Any] = random.choice(_UpperCamelCase )
return "".join(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
# Generate more children proportionally to the fitness score.
lowercase_ : str = int(parent_a[1] * 100 ) + 1
lowercase_ : Dict = 10 if child_n >= 10 else child_n
for _ in range(_UpperCamelCase ):
lowercase_ : int = population_score[random.randint(0 , _UpperCamelCase )][0]
lowercase_ : Dict = crossover(parent_a[0] , _UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
return pop
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
lowercase_ : Optional[Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase_ : Union[str, Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase_ : Optional[Any] = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_UpperCamelCase )
# Generate random starting population.
lowercase_ : Dict = []
for _ in range(_UpperCamelCase ):
population.append("".join([random.choice(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase_ : Optional[int] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase_ : List[Any] = [evaluate(_UpperCamelCase , _UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
lowercase_ : Union[str, Any] = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase_ : Union[str, Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCamelCase )
# Normalize population score to be between 0 and 1.
lowercase_ : Union[str, Any] = [
(item, score / len(_UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCamelCase ):
population.extend(select(population_score[int(_UpperCamelCase )] , _UpperCamelCase , _UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase__ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCamelCase__ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 712
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if nth_term == "":
return [""]
lowercase_ : Optional[int] = int(_UpperCamelCase )
lowercase_ : Optional[int] = int(_UpperCamelCase )
lowercase_ : list[str] = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F"""1 / {pow(temp + 1 , int(_UpperCamelCase ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('Enter the last number (nth term) of the P-Series'))
UpperCamelCase__ = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCamelCase__ = get_logger(__name__)
class _UpperCAmelCase :
def __init__( self : List[str] , a : Optional[str] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = (
os.path.join(a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase_ : List[str] = Extractor
def lowerCAmelCase__ ( self : str , a : str ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase_ : Union[str, Any] = os.path.abspath(a )
return os.path.join(self.extract_dir , hash_url_to_filename(a ) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : str , a : bool ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(a ) and not (os.path.isdir(a ) and os.listdir(a ))
)
def lowerCAmelCase__ ( self : List[Any] , a : str , a : bool = False ):
'''simple docstring'''
lowercase_ : List[Any] = self.extractor.infer_extractor_format(a )
if not extractor_format:
return input_path
lowercase_ : str = self._get_output_path(a )
if self._do_extract(a , a ):
self.extractor.extract(a , a , a )
return output_path
class _UpperCAmelCase ( snake_case ):
@classmethod
@abstractmethod
def lowerCAmelCase__ ( cls : Tuple , a : Union[Path, str] , **a : List[Any] ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
...
class _UpperCAmelCase ( snake_case , snake_case ):
__lowerCamelCase: List[bytes] = []
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : int ):
'''simple docstring'''
with open(a , "rb" ) as f:
return f.read(a )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , a : Union[Path, str] , a : bytes = b"" ):
'''simple docstring'''
if not magic_number:
lowercase_ : int = max(len(a ) for cls_magic_number in cls.magic_numbers )
try:
lowercase_ : List[Any] = cls.read_magic_number(a , a )
except OSError:
return False
return any(magic_number.startswith(a ) for cls_magic_number in cls.magic_numbers )
class _UpperCAmelCase ( snake_case ):
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , a : Union[Path, str] , **a : Optional[Any] ):
'''simple docstring'''
return tarfile.is_tarfile(a )
@staticmethod
def lowerCAmelCase__ ( a : List[str] , a : int ):
'''simple docstring'''
def resolved(a : str ) -> str:
return os.path.realpath(os.path.abspath(a ) )
def badpath(a : str , a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a , a ) ).startswith(a )
def badlink(a : Any , a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase_ : Optional[int] = resolved(os.path.join(a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=a )
lowercase_ : List[str] = resolved(a )
for finfo in members:
if badpath(finfo.name , a ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(a , a ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(a , a ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
os.makedirs(a , exist_ok=a )
lowercase_ : List[str] = tarfile.open(a )
tar_file.extractall(a , members=TarExtractor.safemembers(a , a ) )
tar_file.close()
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = [b'\x1F\x8B']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with gzip.open(a , "rb" ) as gzip_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , a : Union[Path, str] , a : bytes = b"" ):
'''simple docstring'''
if super().is_extractable(a , magic_number=a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a , "rb" ) as fp:
lowercase_ : Optional[int] = _EndRecData(a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase_ : int = fp.read(a ) # CD is where we expect it to be
if len(a ) == sizeCentralDir:
lowercase_ : Union[str, Any] = struct.unpack(a , a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
os.makedirs(a , exist_ok=a )
with zipfile.ZipFile(a , "r" ) as zip_file:
zip_file.extractall(a )
zip_file.close()
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[str] = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with lzma.open(a ) as compressed_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[int] = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(a , exist_ok=a )
lowercase_ : Union[str, Any] = rarfile.RarFile(a )
rf.extractall(a )
rf.close()
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Any = [b'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
lowercase_ : Union[str, Any] = zstd.ZstdDecompressor()
with open(a , "rb" ) as ifh, open(a , "wb" ) as ofh:
dctx.copy_stream(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = [b'\x42\x5A\x68']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with bza.open(a , "rb" ) as compressed_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(a , exist_ok=a )
with pyazr.SevenZipFile(a , "r" ) as archive:
archive.extractall(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = [b'\x04\x22\x4D\x18']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(a , "rb" ) as compressed_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__lowerCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase__ ( cls : Any ):
'''simple docstring'''
return max(
len(a )
for extractor in cls.extractors.values()
if issubclass(a , a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : int ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(a , magic_number_length=a )
except OSError:
return b""
@classmethod
def lowerCAmelCase__ ( cls : int , a : Union[Path, str] , a : bool = False ):
'''simple docstring'''
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=a , )
lowercase_ : str = cls.infer_extractor_format(a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Union[Path, str] ): # <Added version="2.4.0"/>
'''simple docstring'''
lowercase_ : Any = cls._get_magic_number_max_length()
lowercase_ : Any = cls._read_magic_number(a , a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a , magic_number=a ):
return extractor_format
@classmethod
def lowerCAmelCase__ ( cls : List[str] , a : Union[Path, str] , a : Union[Path, str] , a : Optional[str] = None , a : Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(a ) , exist_ok=a )
# Prevent parallel extractions
lowercase_ : int = str(Path(a ).with_suffix(".lock" ) )
with FileLock(a ):
shutil.rmtree(a , ignore_errors=a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a , a ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=a , )
lowercase_ : Union[str, Any] = extractor if extractor != "deprecated" else extractor_format
else:
lowercase_ : Any = cls.extractors[extractor_format]
return extractor.extract(a , a )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(a ):
return extractor.extract(a , a )
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCAmelCase ( snake_case ):
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Union[str, Any] = tempfile.mkdtemp()
lowercase_ : int = 8
# DPR tok
lowercase_ : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(a , exist_ok=a )
lowercase_ : Any = os.path.join(a , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase_ : int = dict(zip(a , range(len(a ) ) ) )
lowercase_ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase_ : List[str] = {"unk_token": "<unk>"}
lowercase_ : List[str] = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(a , exist_ok=a )
lowercase_ : Optional[Any] = os.path.join(a , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : str = os.path.join(a , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , "rag_tokenizer" )
lowercase_ : Optional[int] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowercase_ : Optional[int] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(a )
rag_tokenizer.save_pretrained(a )
lowercase_ : Dict = RagTokenizer.from_pretrained(a , config=a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Any = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
lowercase_ : Union[str, Any] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowercase_ : List[Any] = tokenizer(a )
self.assertIsNotNone(a )
@slow
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Dict = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
lowercase_ : Tuple = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowercase_ : int = tokenizer(a )
self.assertIsNotNone(a )
| 715
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ):
"""simple docstring"""
if start is None:
lowercase_ : List[Any] = 0
if end is None:
lowercase_ : Dict = len(_UpperCamelCase ) - 1
if start >= end:
return
lowercase_ : str = (start + end) // 2
slowsort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
slowsort(_UpperCamelCase , mid + 1 , _UpperCamelCase )
if sequence[end] < sequence[mid]:
lowercase_ : List[str] = sequence[mid], sequence[end]
slowsort(_UpperCamelCase , _UpperCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 716
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 0
|
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self : Union[str, Any] , a : int=None , **a : str ):
'''simple docstring'''
super().__init__(features=a )
lowercase_ : List[str] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase__ ( self : str , a : Dict ):
'''simple docstring'''
import torch
if isinstance(a , a ) and column:
if all(
isinstance(a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a )
return column
def lowerCAmelCase__ ( self : Optional[Any] , a : Union[str, Any] ):
'''simple docstring'''
import torch
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Optional[int] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : List[Any] = {"dtype": torch.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : List[Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
lowercase_ : Dict = np.asarray(a )
return torch.tensor(a , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase__ ( self : Optional[int] , a : Dict ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a , "__array__" ) and not isinstance(a , torch.Tensor ):
lowercase_ : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def lowerCAmelCase__ ( self : Optional[int] , a : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , a , map_list=a )
def lowerCAmelCase__ ( self : Tuple , a : pa.Table ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
lowercase_ : Union[str, Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def lowerCAmelCase__ ( self : str , a : pa.Table ):
'''simple docstring'''
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(a )
lowercase_ : Any = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
lowercase_ : Tuple = self.recursive_tensorize(a )
lowercase_ : List[Any] = self._consolidate(a )
return column
def lowerCAmelCase__ ( self : int , a : pa.Table ):
'''simple docstring'''
lowercase_ : Any = self.numpy_arrow_extractor().extract_batch(a )
lowercase_ : List[Any] = self.python_features_decoder.decode_batch(a )
lowercase_ : str = self.recursive_tensorize(a )
for column_name in batch:
lowercase_ : str = self._consolidate(batch[column_name] )
return batch
| 718
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 0
|
'''simple docstring'''
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Dict = argparse.ArgumentParser(add_help=_UpperCamelCase , allow_abbrev=_UpperCamelCase )
# The main config parser
lowercase_ : Union[str, Any] = config_command_parser(_UpperCamelCase )
# The subparser to add commands to
lowercase_ : List[str] = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(_UpperCamelCase , parents=[parent_parser] )
update_command_parser(_UpperCamelCase , parents=[parent_parser] )
return config_parser
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = get_config_parser()
lowercase_ : int = config_parser.parse_args()
if not hasattr(_UpperCamelCase , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 719
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 720
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 0
|
'''simple docstring'''
from ....utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
def __init__( self : Optional[Any] , a : Optional[int] , a : Any=None , a : Any=2_0_4_8 ):
'''simple docstring'''
lowercase_ : int = config.__dict__
lowercase_ : int = modal_hidden_size
if num_labels:
lowercase_ : List[Any] = num_labels
| 721
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ : List[str] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase_ : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ : Optional[int] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Dict = self.get_dummy_inputs(a )
lowercase_ : List[str] = inputs["prompt"]
lowercase_ : List[str] = inputs["generator"]
lowercase_ : Optional[int] = inputs["num_inference_steps"]
lowercase_ : Tuple = inputs["output_type"]
if "image" in inputs:
lowercase_ : Optional[Any] = inputs["image"]
else:
lowercase_ : int = None
if "mask_image" in inputs:
lowercase_ : str = inputs["mask_image"]
else:
lowercase_ : Dict = None
if "original_image" in inputs:
lowercase_ : Any = inputs["original_image"]
else:
lowercase_ : Optional[int] = None
lowercase_ : Tuple = pipe.encode_prompt(a )
# inputs with prompt converted to embeddings
lowercase_ : Tuple = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase_ : List[Any] = image
if mask_image is not None:
lowercase_ : Any = mask_image
if original_image is not None:
lowercase_ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a , a , a )
lowercase_ : Union[str, Any] = pipe(**a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a )
lowercase_ : List[str] = self.pipeline_class.from_pretrained(a )
pipe_loaded.to(a )
pipe_loaded.set_progress_bar_config(disable=a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
lowercase_ : str = self.get_dummy_inputs(a )
lowercase_ : Any = inputs["generator"]
lowercase_ : int = inputs["num_inference_steps"]
lowercase_ : int = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase_ : Optional[int] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase_ : Optional[int] = image
if mask_image is not None:
lowercase_ : Dict = mask_image
if original_image is not None:
lowercase_ : Union[str, Any] = original_image
lowercase_ : Tuple = pipe_loaded(**a )[0]
lowercase_ : List[Any] = np.abs(to_np(a ) - to_np(a ) ).max()
self.assertLess(a , 1e-4 )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.get_dummy_components()
lowercase_ : str = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Optional[int] = self.get_dummy_inputs(a )
lowercase_ : int = pipe(**a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a )
lowercase_ : Tuple = self.pipeline_class.from_pretrained(a )
pipe_loaded.to(a )
pipe_loaded.set_progress_bar_config(disable=a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase_ : Tuple = self.get_dummy_inputs(a )
lowercase_ : Tuple = pipe_loaded(**a )[0]
lowercase_ : List[str] = np.abs(to_np(a ) - to_np(a ) ).max()
self.assertLess(a , 1e-4 )
| 700
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Any = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
lowercase_ : Any = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
return image
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = dct.pop(_UpperCamelCase )
lowercase_ : str = val
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ : List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase_ : str = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase_ : List[str] = torch.cat((q_bias, torch.zeros_like(_UpperCamelCase , requires_grad=_UpperCamelCase ), v_bias) )
lowercase_ : Any = qkv_bias
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = 364 if "coco" in model_name else 224
lowercase_ : Any = InstructBlipVisionConfig(image_size=_UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase_ : Optional[int] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ : Optional[Any] = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase_ : Union[str, Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase_ : int = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_2001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase_ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
lowercase_ : str = InstructBlipConfig(vision_config=_UpperCamelCase , text_config=_UpperCamelCase , qformer_config=_UpperCamelCase )
return config, image_size
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
lowercase_ : str = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase_ : int = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
lowercase_ : List[Any] = get_blipa_config(_UpperCamelCase )
lowercase_ : List[Any] = InstructBlipForConditionalGeneration(_UpperCamelCase ).eval()
lowercase_ : Dict = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
lowercase_ : Tuple = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowercase_ : List[str] = "cuda:1" if torch.cuda.is_available() else "cpu"
lowercase_ : Optional[int] = "cuda:2" if torch.cuda.is_available() else "cpu"
lowercase_ : List[str] = load_model_and_preprocess(
name=_UpperCamelCase , model_type=_UpperCamelCase , is_eval=_UpperCamelCase , device=_UpperCamelCase )
original_model.eval()
print("Done!" )
# update state dict keys
lowercase_ : int = original_model.state_dict()
lowercase_ : Union[str, Any] = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ : int = state_dict.pop(_UpperCamelCase )
if key.startswith("Qformer.bert" ):
lowercase_ : Tuple = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowercase_ : Any = key.replace("self" , "attention" )
if "llm_proj" in key:
lowercase_ : Union[str, Any] = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
lowercase_ : Tuple = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
lowercase_ : Dict = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
lowercase_ : str = key.replace("t5" , "language" )
lowercase_ : Optional[Any] = val
# read in qv biases
read_in_q_v_bias(_UpperCamelCase , _UpperCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
lowercase_ : Dict = load_demo_image()
lowercase_ : Dict = "What is unusual about this image?"
# create processor
lowercase_ : List[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_UpperCamelCase , image_std=_UpperCamelCase )
lowercase_ : List[Any] = InstructBlipProcessor(
image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase , qformer_tokenizer=_UpperCamelCase , )
lowercase_ : str = processor(images=_UpperCamelCase , text=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# make sure processor creates exact same pixel values
lowercase_ : List[str] = vis_processors["eval"](_UpperCamelCase ).unsqueeze(0 ).to(_UpperCamelCase )
lowercase_ : Tuple = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _UpperCamelCase )
original_model.to(_UpperCamelCase )
hf_model.to(_UpperCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
lowercase_ : List[str] = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
lowercase_ : Union[str, Any] = hf_model(**_UpperCamelCase ).logits
else:
lowercase_ : Any = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
lowercase_ : Any = tokenizer("\n" , return_tensors="pt" ).input_ids.to(_UpperCamelCase )
lowercase_ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowercase_ : List[Any] = hf_model(**_UpperCamelCase , labels=_UpperCamelCase ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase_ : List[str] = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , _UpperCamelCase , atol=_UpperCamelCase )
print("Looks ok!" )
print("Generating with original model..." )
lowercase_ : List[str] = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
lowercase_ : Tuple = hf_model.generate(
**_UpperCamelCase , do_sample=_UpperCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase_ : Any = 2
print("Original generation:" , _UpperCamelCase )
lowercase_ : List[Any] = processor.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
lowercase_ : Tuple = [text.strip() for text in output_text]
print("HF generation:" , _UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
UpperCamelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = "mock-s3-bucket"
lowercase_ : Any = F"""s3://{mock_bucket}"""
lowercase_ : int = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith("s3://" ) is False
lowercase_ : str = "./local/path"
lowercase_ : int = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
lowercase_ : str = fsspec.filesystem("file" )
lowercase_ : List[Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
lowercase_ : Any = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase_ : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
lowercase_ : List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Optional[Any] = os.path.basename(_UpperCamelCase )
lowercase_ : str = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_UpperCamelCase , "r" , encoding="utf-8" ) as f, open(_UpperCamelCase , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
lowercase_ : str = compressed_file_paths[protocol]
lowercase_ : Any = "dataset.jsonl"
lowercase_ : Dict = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase_ : List[Any] = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
lowercase_ : Optional[Any] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_UpperCamelCase ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 702
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 0
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = ['pixel_values']
def __init__( self : Tuple , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_5_5 , a : bool = True , a : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , a : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **a : Dict , ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : Any = size if size is not None else {"shortest_edge": 2_2_4}
lowercase_ : Optional[Any] = get_size_dict(a , default_to_square=a )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
lowercase_ : List[str] = get_size_dict(a , param_name="crop_size" )
lowercase_ : Union[str, Any] = do_resize
lowercase_ : Union[str, Any] = size
lowercase_ : Dict = resample
lowercase_ : Optional[int] = do_center_crop
lowercase_ : int = crop_size
lowercase_ : Any = do_rescale
lowercase_ : Union[str, Any] = rescale_factor
lowercase_ : Tuple = do_normalize
lowercase_ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase__ ( self : Optional[int] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ):
'''simple docstring'''
lowercase_ : int = get_size_dict(a , default_to_square=a )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ : Dict = int((2_5_6 / 2_2_4) * size["shortest_edge"] )
lowercase_ : Tuple = get_resize_output_image_size(a , size=a , default_to_square=a )
lowercase_ : Any = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
a , size=(size_dict["height"], size_dict["width"]) , resample=a , data_format=a , **a )
def lowerCAmelCase__ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , ):
'''simple docstring'''
lowercase_ : Optional[Any] = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def lowerCAmelCase__ ( self : List[str] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ):
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : ImageInput , a : Optional[bool] = None , a : Optional[Dict[str, int]] = None , a : PILImageResampling = None , a : Optional[bool] = None , a : Optional[Dict[str, int]] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, Iterable[float]]] = None , a : Optional[Union[float, Iterable[float]]] = None , a : Optional[TensorType] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Dict , ):
'''simple docstring'''
lowercase_ : Any = do_resize if do_resize is not None else self.do_resize
lowercase_ : Dict = resample if resample is not None else self.resample
lowercase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : List[str] = image_mean if image_mean is not None else self.image_mean
lowercase_ : List[Any] = image_std if image_std is not None else self.image_std
lowercase_ : Union[str, Any] = size if size is not None else self.size
lowercase_ : Dict = get_size_dict(a , default_to_square=a )
lowercase_ : Any = crop_size if crop_size is not None else self.crop_size
lowercase_ : Any = get_size_dict(a , param_name="crop_size" )
lowercase_ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase_ : Optional[int] = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase_ : Any = [self.resize(a , a , a ) for image in images]
if do_center_crop:
lowercase_ : Any = [self.center_crop(a , a ) for image in images]
if do_rescale:
lowercase_ : Optional[int] = [self.rescale(a , a ) for image in images]
if do_normalize:
lowercase_ : Dict = [self.normalize(a , a , a ) for image in images]
lowercase_ : Tuple = [to_channel_dimension_format(a , a ) for image in images]
lowercase_ : int = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 703
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 0
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar('KEY')
UpperCamelCase__ = TypeVar('VAL')
@dataclass(frozen=snake_case , slots=snake_case )
class _UpperCAmelCase ( Generic[KEY, VAL] ):
__lowerCamelCase: KEY
__lowerCamelCase: VAL
class _UpperCAmelCase ( _Item ):
def __init__( self : Dict ):
'''simple docstring'''
super().__init__(a , a )
def __bool__( self : Optional[Any] ):
'''simple docstring'''
return False
UpperCamelCase__ = _DeletedItem()
class _UpperCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[int] , a : int = 8 , a : float = 0.75 ):
'''simple docstring'''
lowercase_ : List[str] = initial_block_size
lowercase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase_ : str = capacity_factor
lowercase_ : Dict = 0
def lowerCAmelCase__ ( self : List[Any] , a : KEY ):
'''simple docstring'''
return hash(a ) % len(self._buckets )
def lowerCAmelCase__ ( self : Dict , a : int ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCAmelCase__ ( self : Any , a : int , a : KEY , a : VAL ):
'''simple docstring'''
lowercase_ : str = self._buckets[ind]
if not stored:
lowercase_ : Tuple = _Item(a , a )
self._len += 1
return True
elif stored.key == key:
lowercase_ : str = _Item(a , a )
return True
else:
return False
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase_ : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCAmelCase__ ( self : List[str] , a : int ):
'''simple docstring'''
lowercase_ : str = self._buckets
lowercase_ : Tuple = [None] * new_size
lowercase_ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCAmelCase__ ( self : Dict , a : KEY ):
'''simple docstring'''
lowercase_ : List[str] = self._get_bucket_index(a )
for _ in range(len(self._buckets ) ):
yield ind
lowercase_ : List[Any] = self._get_next_ind(a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : KEY , a : VAL ):
'''simple docstring'''
for ind in self._iterate_buckets(a ):
if self._try_set(a , a , a ):
break
def __setitem__( self : Optional[int] , a : KEY , a : VAL ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(a , a )
def __delitem__( self : Optional[Any] , a : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(a ):
lowercase_ : int = self._buckets[ind]
if item is None:
raise KeyError(a )
if item is _deleted:
continue
if item.key == key:
lowercase_ : Any = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Dict , a : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(a ):
lowercase_ : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(a )
def __len__( self : List[str] ):
'''simple docstring'''
return self._len
def __iter__( self : Optional[Any] ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 704
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase_ : Optional[Any] = []
queue.append(_UpperCamelCase )
lowercase_ : Dict = True
while queue:
lowercase_ : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase_ : Tuple = True
lowercase_ : List[str] = u
return visited[t]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = [-1] * (len(_UpperCamelCase ))
lowercase_ : Any = 0
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Optional[int] = float("Inf" )
lowercase_ : List[Any] = sink
while s != source:
# Find the minimum value in select path
lowercase_ : Any = min(_UpperCamelCase , graph[parent[s]][s] )
lowercase_ : Optional[Any] = parent[s]
max_flow += path_flow
lowercase_ : Dict = sink
while v != source:
lowercase_ : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase_ : Any = parent[v]
return max_flow
UpperCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase__, UpperCamelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( snake_case ):
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( a : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 706
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
def __init__( self : Any , a : int = 6 ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Node | None = None
lowercase_ : Node | None = None
self.create_linked_list(a )
def lowerCAmelCase__ ( self : Tuple , a : int ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = Node()
lowercase_ : Dict = current_node
lowercase_ : Optional[int] = current_node
lowercase_ : str = current_node
for _ in range(1 , a ):
lowercase_ : str = Node()
lowercase_ : Optional[int] = current_node
lowercase_ : Optional[Any] = previous_node
lowercase_ : List[str] = current_node
lowercase_ : Optional[int] = self.front
lowercase_ : Union[str, Any] = previous_node
def lowerCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase__ ( self : List[Any] , a : Any ) -> List[Any]:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ : Dict = self.rear.next
if self.rear:
lowercase_ : Union[str, Any] = data
def lowerCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ : List[Any] = self.front.data
lowercase_ : List[Any] = None
return data
lowercase_ : Optional[Any] = self.front
lowercase_ : Optional[int] = old_front.next
lowercase_ : int = old_front.data
lowercase_ : Optional[Any] = None
return data
def lowerCAmelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
if self.is_empty():
raise Exception("Empty Queue" )
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class _UpperCAmelCase :
def __init__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase_ : Any | None = None
lowercase_ : Node | None = None
lowercase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : Any = "backbone." if is_semantic else ""
lowercase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(F"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(F"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(F"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : List[str] = "backbone." if is_semantic else ""
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowercase_ : Dict = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
lowercase_ : Optional[int] = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
lowercase_ : Any = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : Tuple = q_bias
lowercase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Tuple = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
lowercase_ : List[Any] = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
lowercase_ : Any = gamma_a
lowercase_ : List[str] = gamma_a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = dct.pop(_UpperCamelCase )
lowercase_ : Optional[Any] = val
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ : Optional[int] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : List[Any] = False if "rvlcdip" in checkpoint_url else True
lowercase_ : Optional[int] = BeitConfig(use_absolute_position_embeddings=_UpperCamelCase , use_mask_token=_UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Union[str, Any] = 1024
lowercase_ : Union[str, Any] = 4096
lowercase_ : List[str] = 24
lowercase_ : Optional[int] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Any = 16
lowercase_ : int = "huggingface/label-files"
lowercase_ : Union[str, Any] = "rvlcdip-id2label.json"
lowercase_ : int = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
lowercase_ : Tuple = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
lowercase_ : Dict = idalabel
lowercase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )["model"]
lowercase_ : Any = create_rename_keys(_UpperCamelCase , has_lm_head=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , has_lm_head=_UpperCamelCase )
# load HuggingFace model
lowercase_ : List[Any] = BeitForMaskedImageModeling(_UpperCamelCase ) if has_lm_head else BeitForImageClassification(_UpperCamelCase )
model.eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
lowercase_ : str = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCamelCase )
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=_UpperCamelCase , return_tensors="pt" )
lowercase_ : Any = encoding["pixel_values"]
lowercase_ : Any = model(_UpperCamelCase )
lowercase_ : Any = outputs.logits
# verify logits
lowercase_ : Dict = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_UpperCamelCase ), "Shape of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
if has_lm_head:
lowercase_ : Any = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowercase_ : Tuple = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
UpperCamelCase__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 708
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 0
|
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase__ = tuple[int, int]
class _UpperCAmelCase :
def __init__( self : Optional[int] ):
'''simple docstring'''
lowercase_ : List[str] = []
lowercase_ : List[str] = set()
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.elements ) == 0
def lowerCAmelCase__ ( self : Any , a : List[Any] , a : Tuple ):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(a )
else:
# update
# print("update", item)
lowercase_ : int = []
(lowercase_) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(lowercase_) : List[Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
if item in self.set:
self.set.remove(a )
lowercase_ : Optional[Any] = []
(lowercase_) : List[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(lowercase_) : Dict = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.elements[0][1]
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
(lowercase_) : Optional[Any] = heapq.heappop(self.elements )
self.set.remove(a )
return (priority, item)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = np.array(_UpperCamelCase )
lowercase_ : Optional[int] = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
lowercase_ : List[str] = "*"
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
lowercase_ : int = "#"
lowercase_ : Optional[int] = "-"
lowercase_ : List[Any] = back_pointer[goal]
while x != start:
(lowercase_) : Any = x
# print(x)
lowercase_ : Dict = "-"
lowercase_ : str = back_pointer[x]
lowercase_ : Any = "-"
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
lowercase_ : Dict = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=" " )
lowercase_ : Dict = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
(lowercase_) : Dict = s
lowercase_ : Union[str, Any] = (x - 1, y)
lowercase_ : Tuple = (x + 1, y)
lowercase_ : str = (x, y + 1)
lowercase_ : Dict = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = float("inf" )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
lowercase_ : Dict = g_function[s] + 1
lowercase_ : Optional[Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Any = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
UpperCamelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase__ = make_common_ground()
UpperCamelCase__ = blocks_blk
# hyper parameters
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = 20
UpperCamelCase__ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase__ = (0, 0)
UpperCamelCase__ = (n - 1, n - 1)
UpperCamelCase__ = 1
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = {start: 0, goal: float("inf" )}
lowercase_ : Any = {start: -1, goal: -1}
lowercase_ : List[Any] = []
lowercase_ : Dict = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
lowercase_ : list[int] = []
lowercase_ : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
lowercase_ : int = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
lowercase_ : List[str] = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 709
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 0
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
UpperCamelCase__ = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowercase_ : Union[str, Any] = int(re.match(R".*layer_(\d*).*" , _UpperCamelCase )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowercase_ : List[str] = re.search(R"[^\d](\d+)$" , str(_UpperCamelCase ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
lowercase_ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if bloom_config_file == "":
lowercase_ : Tuple = BloomConfig()
else:
lowercase_ : str = BloomConfig.from_json_file(_UpperCamelCase )
if shard_model:
lowercase_ : int = os.listdir(_UpperCamelCase )
lowercase_ : Union[str, Any] = sorted(filter(lambda _UpperCamelCase : s.startswith("layer" ) and "model_00" in s , _UpperCamelCase ) )
lowercase_ : str = {"weight_map": {}, "metadata": {}}
lowercase_ : Any = 0
lowercase_ : str = None
lowercase_ : Any = BloomConfig()
for j, file in enumerate(_UpperCamelCase ):
print("Processing file: {}".format(_UpperCamelCase ) )
lowercase_ : int = None
for i in range(_UpperCamelCase ):
# load all TP files
lowercase_ : Union[str, Any] = file.replace("model_00" , F"""model_0{i}""" )
lowercase_ : Dict = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
lowercase_ : str = list(temp.keys() )
for key in keys:
lowercase_ : Tuple = temp.pop(_UpperCamelCase )
if tensors is None:
lowercase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ : Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ : Union[str, Any] = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ : str = tensors[key] / pretraining_tp
torch.save(
_UpperCamelCase , os.path.join(
_UpperCamelCase , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowercase_ : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowercase_ : Any = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) )
lowercase_ : int = BloomConfig()
lowercase_ : List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
lowercase_ : Dict = total_size
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_UpperCamelCase , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
lowercase_ : Any = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + "\n"
f.write(_UpperCamelCase )
else:
lowercase_ : Tuple = BloomModel(_UpperCamelCase )
lowercase_ : Any = os.listdir(_UpperCamelCase )
lowercase_ : Dict = sorted(filter(lambda _UpperCamelCase : s.startswith("layer" ) and "model_00" in s , _UpperCamelCase ) )
lowercase_ : List[Any] = None
for i, file in enumerate(_UpperCamelCase ):
lowercase_ : int = None
for i in range(_UpperCamelCase ):
# load all TP files
lowercase_ : List[Any] = file.replace("model_00" , F"""model_0{i}""" )
lowercase_ : List[Any] = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
lowercase_ : int = list(temp.keys() )
for key in keys:
lowercase_ : Optional[Any] = temp.pop(_UpperCamelCase )
if tensors is None:
lowercase_ : List[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ : Any = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ : Any = tensors[key] / pretraining_tp
lowercase_ : int = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
lowercase_ : List[str] = set(other_keys.missing_keys )
else:
lowercase_ : int = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
lowercase_ : int = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowercase_ : str = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
lowercase_ : List[str] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
UpperCamelCase__ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCamelCase__ = 'hf-internal-testing/tiny-random-bert'
UpperCamelCase__ = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
UpperCamelCase__ = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = cached_file(a , a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(a , a ) ) )
with open(os.path.join(a , "refs" , "main" ) ) as f:
lowercase_ : List[Any] = f.read()
self.assertEqual(a , os.path.join(a , "snapshots" , a , a ) )
self.assertTrue(os.path.isfile(a ) )
# File is cached at the same place the second time.
lowercase_ : List[str] = cached_file(a , a )
self.assertEqual(a , a )
# Using a specific revision to test the full commit hash.
lowercase_ : int = cached_file(a , a , revision="9b8c223" )
self.assertEqual(a , os.path.join(a , "snapshots" , a , a ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(a , "is not a valid model identifier" ):
lowercase_ : Any = cached_file("tiny-random-bert" , a )
with self.assertRaisesRegex(a , "is not a valid git identifier" ):
lowercase_ : Tuple = cached_file(a , a , revision="aaaa" )
with self.assertRaisesRegex(a , "does not appear to have a file named" ):
lowercase_ : List[Any] = cached_file(a , "conf" )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(a , "does not appear to have a file named" ):
lowercase_ : Optional[Any] = cached_file(a , "conf" )
with open(os.path.join(a , "refs" , "main" ) ) as f:
lowercase_ : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(a , ".no_exist" , a , "conf" ) ) )
lowercase_ : int = cached_file(a , "conf" , _raise_exceptions_for_missing_entries=a )
self.assertIsNone(a )
lowercase_ : List[Any] = cached_file(a , "conf" , local_files_only=a , _raise_exceptions_for_missing_entries=a )
self.assertIsNone(a )
lowercase_ : int = mock.Mock()
lowercase_ : Tuple = 5_0_0
lowercase_ : Tuple = {}
lowercase_ : List[Any] = HTTPError
lowercase_ : Optional[int] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a ) as mock_head:
lowercase_ : Union[str, Any] = cached_file(a , "conf" , _raise_exceptions_for_connection_errors=a )
self.assertIsNone(a )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , a ) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(a , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(a , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , a , revision="ahaha" )
lowercase_ : Optional[int] = get_file_from_repo("bert-base-cased" , a )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase_ : Dict = json.loads(open(a , "r" ).read() )
self.assertEqual(config["hidden_size"] , 7_6_8 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = Path(a ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(a , "a.txt" ) , str(a ) )
self.assertIsNone(get_file_from_repo(a , "b.txt" ) )
| 711
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return "".join(sorted(_UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return word_by_signature[signature(_UpperCamelCase )]
UpperCamelCase__ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
UpperCamelCase__ = sorted({word.strip().lower() for word in data.splitlines()})
UpperCamelCase__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCamelCase__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 712
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("only integers accepted as input" )
else:
lowercase_ : List[Any] = str(abs(_UpperCamelCase ) )
lowercase_ : int = [list(_UpperCamelCase ) for char in range(len(_UpperCamelCase ) )]
for index in range(len(_UpperCamelCase ) ):
num_transpositions[index].pop(_UpperCamelCase )
return max(
int("".join(list(_UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase="ro" , _UpperCamelCase="en" , _UpperCamelCase="wmt16" , _UpperCamelCase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
lowercase_ : Dict = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
lowercase_ : int = datasets.load_dataset(_UpperCamelCase , _UpperCamelCase )
if save_dir is None:
lowercase_ : Optional[int] = F"""{dataset}-{pair}"""
lowercase_ : Optional[Any] = Path(_UpperCamelCase )
save_dir.mkdir(exist_ok=_UpperCamelCase )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
lowercase_ : Any = "val" if split == "validation" else split
lowercase_ : Tuple = save_dir.joinpath(F"""{fn}.source""" )
lowercase_ : Dict = save_dir.joinpath(F"""{fn}.target""" )
lowercase_ : int = src_path.open("w+" )
lowercase_ : List[str] = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase_ : int = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 715
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 716
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowercase_ : List[Any] = b * b - 4 * a * c
lowercase_ : Tuple = (-b + sqrt(_UpperCamelCase )) / (2 * a)
lowercase_ : Dict = (-b - sqrt(_UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : int = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 717
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 0
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowercase_ : Optional[Any] = s_dict.pop(_UpperCamelCase )
elif "subsample" in key:
lowercase_ : List[str] = s_dict.pop(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = emb.weight.shape
lowercase_ : List[str] = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowercase_ : Union[str, Any] = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = torch.load(_UpperCamelCase , map_location="cpu" )
lowercase_ : List[Any] = mam_aaa["args"]
lowercase_ : Any = mam_aaa["model"]
lowercase_ : Union[str, Any] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
lowercase_ : Union[str, Any] = state_dict["decoder.embed_tokens.weight"].shape[0]
lowercase_ : Tuple = args.share_decoder_input_output_embed
lowercase_ : Any = [int(_UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
lowercase_ : Dict = SpeechaTextConfig(
vocab_size=_UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(_UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_UpperCamelCase , num_beams=5 , max_length=200 , use_cache=_UpperCamelCase , decoder_start_token_id=2 , early_stopping=_UpperCamelCase , )
lowercase_ : int = SpeechaTextForConditionalGeneration(_UpperCamelCase )
lowercase_ : Union[str, Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase_ : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase_ : Any = lm_head_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 718
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 0
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Any = 'mvp'
__lowerCamelCase: List[str] = ['past_key_values']
__lowerCamelCase: int = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , a : Union[str, Any]=5_0_2_6_7 , a : Union[str, Any]=1_0_2_4 , a : Optional[Any]=1_2 , a : Dict=4_0_9_6 , a : Optional[int]=1_6 , a : Optional[Any]=1_2 , a : Optional[int]=4_0_9_6 , a : Optional[int]=1_6 , a : Optional[Any]=0.0 , a : Dict=0.0 , a : List[str]="gelu" , a : int=1_0_2_4 , a : int=0.1 , a : List[str]=0.0 , a : int=0.0 , a : Optional[int]=0.02 , a : Any=0.0 , a : str=False , a : List[str]=True , a : Union[str, Any]=1 , a : List[Any]=0 , a : Any=2 , a : Optional[Any]=True , a : int=2 , a : Dict=2 , a : str=False , a : List[Any]=1_0_0 , a : int=8_0_0 , **a : str , ):
'''simple docstring'''
lowercase_ : Dict = vocab_size
lowercase_ : Any = max_position_embeddings
lowercase_ : Tuple = d_model
lowercase_ : Any = encoder_ffn_dim
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Union[str, Any] = encoder_attention_heads
lowercase_ : Tuple = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : List[str] = decoder_attention_heads
lowercase_ : int = dropout
lowercase_ : str = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Tuple = activation_function
lowercase_ : int = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[Any] = decoder_layerdrop
lowercase_ : Optional[int] = classifier_dropout
lowercase_ : int = use_cache
lowercase_ : Any = encoder_layers
lowercase_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : List[str] = use_prompt
lowercase_ : Tuple = prompt_length
lowercase_ : str = prompt_mid_dim
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , a ):
lowercase_ : Tuple = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
| 719
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( snake_case ):
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( a : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
raise NotImplementedError()
| 720
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = np.shape(_UpperCamelCase )
if rows != columns:
lowercase_ : List[str] = (
"'table' has to be of square shaped array but got a "
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(_UpperCamelCase )
lowercase_ : Dict = np.zeros((rows, columns) )
lowercase_ : Any = np.zeros((rows, columns) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
lowercase_ : List[str] = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowercase_ : str = (table[i][j] - total) / upper[j][j]
lowercase_ : Tuple = 1
for j in range(_UpperCamelCase , _UpperCamelCase ):
lowercase_ : int = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase ) )
lowercase_ : Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 700
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 0
|
'''simple docstring'''
import math
UpperCamelCase__ = 10
UpperCamelCase__ = 7
UpperCamelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 20 ):
"""simple docstring"""
lowercase_ : Dict = math.comb(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _UpperCamelCase )
lowercase_ : Tuple = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[PIL.Image.Image, np.ndarray]
class _UpperCAmelCase ( snake_case ):
def __init__( self : Union[str, Any] , a : PriorTransformer , a : CLIPVisionModel , a : CLIPImageProcessor , a : HeunDiscreteScheduler , a : ShapERenderer , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=a , image_encoder=a , image_processor=a , scheduler=a , renderer=a , )
def lowerCAmelCase__ ( self : List[str] , a : List[Any] , a : List[str] , a : str , a : str , a : Tuple , a : Tuple ):
'''simple docstring'''
if latents is None:
lowercase_ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Dict , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Any , a : Optional[int] , ):
'''simple docstring'''
if isinstance(a , a ) and isinstance(image[0] , torch.Tensor ):
lowercase_ : List[str] = torch.cat(a , axis=0 ) if image[0].ndim == 4 else torch.stack(a , axis=0 )
if not isinstance(a , torch.Tensor ):
lowercase_ : Any = self.image_processor(a , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowercase_ : Optional[Any] = image.to(dtype=self.image_encoder.dtype , device=a )
lowercase_ : int = self.image_encoder(a )["last_hidden_state"]
lowercase_ : Tuple = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase_ : Dict = image_embeds.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Union[str, Any] = torch.zeros_like(a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[PIL.Image.Image, List[PIL.Image.Image]] , a : int = 1 , a : int = 2_5 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : float = 4.0 , a : int = 6_4 , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , PIL.Image.Image ):
lowercase_ : Optional[Any] = 1
elif isinstance(a , torch.Tensor ):
lowercase_ : str = image.shape[0]
elif isinstance(a , a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase_ : Tuple = len(a )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(a )}""" )
lowercase_ : Union[str, Any] = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ : Optional[Any] = self._encode_image(a , a , a , a )
# prior
self.scheduler.set_timesteps(a , device=a )
lowercase_ : Any = self.scheduler.timesteps
lowercase_ : List[str] = self.prior.config.num_embeddings
lowercase_ : Optional[int] = self.prior.config.embedding_dim
lowercase_ : str = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , a , a , a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase_ : Dict = latents.reshape(latents.shape[0] , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Any = self.scheduler.scale_model_input(a , a )
lowercase_ : List[Any] = self.prior(
a , timestep=a , proj_embedding=a , ).predicted_image_embedding
# remove the variance
lowercase_ : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase_ : Dict = noise_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase_ : List[Any] = self.scheduler.step(
a , timestep=a , sample=a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=a )
lowercase_ : Any = []
for i, latent in enumerate(a ):
print()
lowercase_ : Optional[int] = self.renderer.decode(
latent[None, :] , a , size=a , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(a )
lowercase_ : int = torch.stack(a )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowercase_ : str = images.cpu().numpy()
if output_type == "pil":
lowercase_ : Optional[int] = [self.numpy_to_pil(a ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=a )
| 702
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = [0 for i in range(len(_UpperCamelCase ) )]
# initialize interval's left pointer and right pointer
lowercase_ : str = 0, 0
for i in range(1 , len(_UpperCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase_ : int = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase_ : str = min_edge
while go_next(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase_ : Optional[Any] = i, i + z_result[i] - 1
return z_result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return i + z_result[i] < len(_UpperCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase_ : Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return x + 2
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : str = "x = 3"
lowercase_ : Union[str, Any] = {}
lowercase_ : Dict = evaluate(a , {} , state=a )
assert result == 3
self.assertDictEqual(a , {"x": 3} )
lowercase_ : Optional[int] = "x = y"
lowercase_ : Optional[Any] = {"y": 5}
lowercase_ : Tuple = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a , {"x": 5, "y": 5} )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = "y = add_two(x)"
lowercase_ : Optional[Any] = {"x": 3}
lowercase_ : Union[str, Any] = evaluate(a , {"add_two": add_two} , state=a )
assert result == 5
self.assertDictEqual(a , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase_ : List[Any] = evaluate(a , {} , state=a )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = "x = 3"
lowercase_ : int = {}
lowercase_ : List[str] = evaluate(a , {} , state=a )
assert result == 3
self.assertDictEqual(a , {"x": 3} )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Any = "test_dict = {'x': x, 'y': add_two(x)}"
lowercase_ : List[Any] = {"x": 3}
lowercase_ : Optional[int] = evaluate(a , {"add_two": add_two} , state=a )
self.assertDictEqual(a , {"x": 3, "y": 5} )
self.assertDictEqual(a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : str = "x = 3\ny = 5"
lowercase_ : Any = {}
lowercase_ : Optional[Any] = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a , {"x": 3, "y": 5} )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = "text = f'This is x: {x}.'"
lowercase_ : List[Any] = {"x": 3}
lowercase_ : str = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a , {"x": 3, "text": "This is x: 3."} )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Tuple = "if x <= 3:\n y = 2\nelse:\n y = 5"
lowercase_ : List[Any] = {"x": 3}
lowercase_ : Tuple = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a , {"x": 3, "y": 2} )
lowercase_ : List[str] = {"x": 8}
lowercase_ : List[Any] = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a , {"x": 8, "y": 5} )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[Any] = "test_list = [x, add_two(x)]"
lowercase_ : int = {"x": 3}
lowercase_ : Dict = evaluate(a , {"add_two": add_two} , state=a )
self.assertListEqual(a , [3, 5] )
self.assertDictEqual(a , {"x": 3, "test_list": [3, 5]} )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[Any] = "y = x"
lowercase_ : Dict = {"x": 3}
lowercase_ : List[Any] = evaluate(a , {} , state=a )
assert result == 3
self.assertDictEqual(a , {"x": 3, "y": 3} )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Dict = "test_list = [x, add_two(x)]\ntest_list[1]"
lowercase_ : Union[str, Any] = {"x": 3}
lowercase_ : Union[str, Any] = evaluate(a , {"add_two": add_two} , state=a )
assert result == 5
self.assertDictEqual(a , {"x": 3, "test_list": [3, 5]} )
lowercase_ : str = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
lowercase_ : Dict = {"x": 3}
lowercase_ : Tuple = evaluate(a , {"add_two": add_two} , state=a )
assert result == 5
self.assertDictEqual(a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[Any] = "x = 0\nfor i in range(3):\n x = i"
lowercase_ : str = {}
lowercase_ : Optional[Any] = evaluate(a , {"range": range} , state=a )
assert result == 2
self.assertDictEqual(a , {"x": 2, "i": 2} )
| 704
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if index == number_of_items:
return 0
lowercase_ : Tuple = 0
lowercase_ : Dict = 0
lowercase_ : int = knapsack(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , index + 1 )
if weights[index] <= max_weight:
lowercase_ : Tuple = values[index] + knapsack(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , max_weight - weights[index] , index + 1 )
return max(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class _UpperCAmelCase :
def __init__( self : List[Any] , a : List[Any] , a : List[Any] , a : int , a : Optional[Any]=None , a : Optional[int]=None ):
'''simple docstring'''
lowercase_ : Dict = start
lowercase_ : Tuple = end
lowercase_ : Any = val
lowercase_ : Optional[int] = (start + end) // 2
lowercase_ : Any = left
lowercase_ : List[str] = right
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , a : Sequence , a : Optional[int] ):
'''simple docstring'''
lowercase_ : List[str] = collection
lowercase_ : Union[str, Any] = function
if self.collection:
lowercase_ : Union[str, Any] = self._build_tree(0 , len(a ) - 1 )
def lowerCAmelCase__ ( self : str , a : Optional[int] , a : List[Any] ):
'''simple docstring'''
self._update_tree(self.root , a , a )
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : Dict ):
'''simple docstring'''
return self._query_range(self.root , a , a )
def lowerCAmelCase__ ( self : List[str] , a : List[str] , a : Tuple ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(a , a , self.collection[start] )
lowercase_ : Union[str, Any] = (start + end) // 2
lowercase_ : Dict = self._build_tree(a , a )
lowercase_ : Union[str, Any] = self._build_tree(mid + 1 , a )
return SegmentTreeNode(a , a , self.fn(left.val , right.val ) , a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict , a : Tuple , a : List[str] ):
'''simple docstring'''
if node.start == i and node.end == i:
lowercase_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left , a , a )
else:
self._update_tree(node.right , a , a )
lowercase_ : int = self.fn(node.left.val , node.right.val )
def lowerCAmelCase__ ( self : Optional[Any] , a : Union[str, Any] , a : Any , a : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , a , a )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , a , node.mid ) , self._query_range(node.right , node.mid + 1 , a ) , )
else:
# range in right child tree
return self._query_range(node.right , a , a )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if self.root is not None:
lowercase_ : List[str] = Queue()
queue.put(self.root )
while not queue.empty():
lowercase_ : List[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
UpperCamelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 706
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[str] = 'xlm'
__lowerCamelCase: Any = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : Any , a : str=3_0_1_4_5 , a : List[str]=2_0_4_8 , a : int=1_2 , a : int=1_6 , a : Optional[int]=0.1 , a : Optional[Any]=0.1 , a : List[Any]=True , a : Optional[Any]=False , a : Dict=False , a : Union[str, Any]=False , a : List[str]=1 , a : Any=True , a : List[Any]=5_1_2 , a : Union[str, Any]=2_0_4_8**-0.5 , a : str=1e-12 , a : str=0.02 , a : Optional[int]=0 , a : int=1 , a : Optional[int]=2 , a : int=3 , a : str=5 , a : List[Any]=True , a : Tuple="first" , a : List[Any]=True , a : str=None , a : int=True , a : Dict=0.1 , a : Any=5 , a : Tuple=5 , a : Dict=0 , a : List[str]=0 , a : Union[str, Any]=2 , a : str=0 , **a : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = vocab_size
lowercase_ : str = emb_dim
lowercase_ : Any = n_layers
lowercase_ : Tuple = n_heads
lowercase_ : Union[str, Any] = dropout
lowercase_ : Optional[Any] = attention_dropout
lowercase_ : Union[str, Any] = gelu_activation
lowercase_ : Dict = sinusoidal_embeddings
lowercase_ : List[Any] = causal
lowercase_ : Dict = asm
lowercase_ : List[str] = n_langs
lowercase_ : int = use_lang_emb
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : List[Any] = bos_index
lowercase_ : Any = eos_index
lowercase_ : int = pad_index
lowercase_ : Dict = unk_index
lowercase_ : List[str] = mask_index
lowercase_ : Union[str, Any] = is_encoder
lowercase_ : Any = max_position_embeddings
lowercase_ : int = embed_init_std
lowercase_ : List[str] = init_std
lowercase_ : int = summary_type
lowercase_ : List[Any] = summary_use_proj
lowercase_ : Optional[Any] = summary_activation
lowercase_ : Dict = summary_proj_to_labels
lowercase_ : Any = summary_first_dropout
lowercase_ : Union[str, Any] = start_n_top
lowercase_ : List[Any] = end_n_top
lowercase_ : Optional[int] = mask_token_id
lowercase_ : List[Any] = lang_id
if "n_words" in kwargs:
lowercase_ : str = kwargs["n_words"]
super().__init__(pad_token_id=a , bos_token_id=a , **a )
class _UpperCAmelCase ( snake_case ):
@property
def lowerCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase_ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import json
import sys
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with open(_UpperCamelCase , encoding="utf-8" ) as f:
lowercase_ : List[Any] = json.load(_UpperCamelCase )
lowercase_ : List[str] = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_UpperCamelCase ):
lowercase_ : str = results[benchmark_name]
lowercase_ : Union[str, Any] = benchmark_name.split("/" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
lowercase_ : Optional[int] = "| metric |"
lowercase_ : Any = "|--------|"
lowercase_ : int = "| new / old (diff) |"
for metric_name in sorted(_UpperCamelCase ):
lowercase_ : Optional[Any] = benchmark_res[metric_name]
lowercase_ : Optional[Any] = metric_vals["new"]
lowercase_ : str = metric_vals.get("old" , _UpperCamelCase )
lowercase_ : List[Any] = metric_vals.get("diff" , _UpperCamelCase )
lowercase_ : str = F""" {new_val:f}""" if isinstance(_UpperCamelCase , (int, float) ) else "None"
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_UpperCamelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_UpperCamelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_UpperCamelCase ) )
if __name__ == "__main__":
UpperCamelCase__ = sys.argv[1]
UpperCamelCase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 708
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 0
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 709
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = 1
lowercase_ : Union[str, Any] = 2
while i * i <= n:
lowercase_ : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : Any = 1
while True:
i += 1
t_num += i
if count_divisors(_UpperCamelCase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
'''simple docstring'''
UpperCamelCase__ = 'Tobias Carryer'
from time import time
class _UpperCAmelCase :
def __init__( self : Optional[int] , a : int , a : Union[str, Any] , a : int , a : Optional[Any]=int(time() ) ): # noqa: B008
'''simple docstring'''
lowercase_ : Tuple = multiplier
lowercase_ : Union[str, Any] = increment
lowercase_ : Any = modulo
lowercase_ : str = seed
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Tuple = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCamelCase__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 711
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 0
|
'''simple docstring'''
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'naver-clova-ix/donut-base-finetuned-docvqa'
__lowerCamelCase: Any = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__lowerCamelCase: Tuple = 'document_qa'
__lowerCamelCase: List[Any] = AutoProcessor
__lowerCamelCase: str = VisionEncoderDecoderModel
__lowerCamelCase: List[Any] = ['image', 'text']
__lowerCamelCase: Optional[int] = ['text']
def __init__( self : Union[str, Any] , *a : List[Any] , **a : Tuple ):
'''simple docstring'''
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*a , **a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : "Image" , a : str ):
'''simple docstring'''
lowercase_ : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
lowercase_ : int = task_prompt.replace("{user_input}" , a )
lowercase_ : List[str] = self.pre_processor.tokenizer(
a , add_special_tokens=a , return_tensors="pt" ).input_ids
lowercase_ : Union[str, Any] = self.pre_processor(a , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase__ ( self : Any , a : List[Any] ):
'''simple docstring'''
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences
def lowerCAmelCase__ ( self : Any , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.pre_processor.batch_decode(a )[0]
lowercase_ : Optional[int] = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
lowercase_ : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
lowercase_ : List[Any] = re.sub(R"<.*?>" , "" , a , count=1 ).strip() # remove first task start token
lowercase_ : Union[str, Any] = self.pre_processor.tokenajson(a )
return sequence["answer"]
| 712
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCAmelCase :
def __init__( self : Dict , a : str , a : str=2 , a : Optional[Any]=3_2 , a : int=1_6 , a : Dict=3 , a : Tuple=True , a : Any=True , a : List[Any]=3_2 , a : Optional[int]=4 , a : str=[0, 1, 2, 3] , a : Optional[int]=4 , a : List[Any]=3_7 , a : Dict="gelu" , a : Optional[int]=0.1 , a : Dict=0.1 , a : Optional[Any]=0.02 , a : List[Any]=3 , a : int=[1, 3_8_4, 2_4, 2_4] , a : Any=True , a : Union[str, Any]=None , ):
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Any = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : Dict = patch_size
lowercase_ : int = num_channels
lowercase_ : int = is_training
lowercase_ : List[str] = use_labels
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : Any = backbone_out_indices
lowercase_ : int = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = initializer_range
lowercase_ : Optional[int] = num_labels
lowercase_ : Tuple = backbone_featmap_shape
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Optional[int] = (image_size // patch_size) ** 2
lowercase_ : List[str] = num_patches + 1
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
if self.use_labels:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase__ ( self : int , a : Optional[int] , a : Optional[Any] , a : Tuple ):
'''simple docstring'''
lowercase_ : Union[str, Any] = DPTModel(config=a )
model.to(a )
model.eval()
lowercase_ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Tuple , a : Any , a : Union[str, Any] , a : str ):
'''simple docstring'''
lowercase_ : Tuple = self.num_labels
lowercase_ : List[Any] = DPTForDepthEstimation(a )
model.to(a )
model.eval()
lowercase_ : Any = model(a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict , a : Union[str, Any] , a : int ):
'''simple docstring'''
lowercase_ : Optional[int] = self.num_labels
lowercase_ : Any = DPTForSemanticSegmentation(a )
model.to(a )
model.eval()
lowercase_ : Tuple = model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowerCamelCase: Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase: Tuple = False
__lowerCamelCase: Tuple = False
__lowerCamelCase: List[Any] = False
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : List[Any] = DPTModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(a )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = True
if model_class in get_values(a ):
continue
lowercase_ : Union[str, Any] = model_class(a )
model.to(a )
model.train()
lowercase_ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase_ : str = model(**a ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[int] = False
lowercase_ : List[str] = True
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
lowercase_ : Dict = model_class(a )
model.to(a )
model.gradient_checkpointing_enable()
model.train()
lowercase_ : Tuple = self._prepare_for_class(a , a , return_labels=a )
lowercase_ : List[str] = model(**a ).loss
loss.backward()
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = _config_zero_init(a )
for model_class in self.all_model_classes:
lowercase_ : str = model_class(config=a )
# Skip the check for the backbone
lowercase_ : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase_ : Dict = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase_ : str = DPTModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = "add"
with self.assertRaises(a ):
lowercase_ : Optional[Any] = DPTForDepthEstimation(a )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
lowercase_ : Optional[Any] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a )
lowercase_ : Optional[int] = prepare_img()
lowercase_ : List[str] = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
lowercase_ : Optional[int] = model(**a )
lowercase_ : Tuple = outputs.predicted_depth
# verify the predicted depth
lowercase_ : List[Any] = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , a )
lowercase_ : int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , a , atol=1e-4 ) )
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
'''simple docstring'''
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import math
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if (
not isinstance(_UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if (
not isinstance(_UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.