code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import numpy as np
def lowerCamelCase__ ( A__ : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase_ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = 'retribert'
def __init__( self: str , UpperCamelCase_: List[str]=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=8 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[Any]=30_72 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=5_12 , UpperCamelCase_: Dict=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: List[str]=1_28 , UpperCamelCase_: Any=0 , **UpperCamelCase_: Optional[int] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = share_encoders
__lowerCamelCase = projection_dim
| 29
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 1
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple=13 , UpperCamelCase_: Union[str, Any]=7 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: str=True , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=99 , UpperCamelCase_: Tuple=64 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: List[str]=64 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Tuple=16 , UpperCamelCase_: Any=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: str=3 , UpperCamelCase_: int=4 , UpperCamelCase_: str=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCAmelCase__ ( self: Union[str, Any] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self: Union[str, Any] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = MPNetModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Dict ):
__lowerCamelCase = MPNetForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = MPNetForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = self.num_choices
__lowerCamelCase = MPNetForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = MPNetForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = True
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = MPNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCamelCase_ )
@require_torch
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
__lowerCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowerCamelCase = model(UpperCamelCase_ )[0]
__lowerCamelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 29
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: Dict=False , UpperCamelCase_: Any=False , UpperCamelCase_: int=6.0 , UpperCamelCase_: Tuple=None , UpperCamelCase_: Dict=False , UpperCamelCase_: str=False , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Dict="fp4" , UpperCamelCase_: int=False , **UpperCamelCase_: str , ):
__lowerCamelCase = load_in_abit
__lowerCamelCase = load_in_abit
__lowerCamelCase = llm_inta_threshold
__lowerCamelCase = llm_inta_skip_modules
__lowerCamelCase = llm_inta_enable_fpaa_cpu_offload
__lowerCamelCase = llm_inta_has_fpaa_weight
__lowerCamelCase = bnb_abit_quant_type
__lowerCamelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__lowerCamelCase = torch.floataa
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = getattr(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , torch.dtype ):
__lowerCamelCase = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def lowerCAmelCase__ ( self: Optional[int] ):
if not isinstance(self.llm_inta_threshold , UpperCamelCase_ ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCamelCase_ ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCamelCase_ ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , UpperCamelCase_ ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , UpperCamelCase_ ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , UpperCamelCase_ ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def lowerCAmelCase__ ( self: Dict ):
return self.load_in_abit or self.load_in_abit
def lowerCAmelCase__ ( self: Optional[Any] ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowerCAmelCase__ ( cls: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple , **UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = cls(**UpperCamelCase_ )
__lowerCamelCase = []
for key, value in kwargs.items():
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
to_remove.append(UpperCamelCase_ )
for key in to_remove:
kwargs.pop(UpperCamelCase_ , UpperCamelCase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Union[str, os.PathLike] ):
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
__lowerCamelCase = self.to_dict()
__lowerCamelCase = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + """\n"""
writer.write(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self: Optional[int] ):
return F'{self.__class__.__name__} {self.to_json_string()}'
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: bool = True ):
if use_diff is True:
__lowerCamelCase = self.to_diff_dict()
else:
__lowerCamelCase = self.to_dict()
return json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + "\n"
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.to_dict()
# get the default config dict
__lowerCamelCase = BitsAndBytesConfig().to_dict()
__lowerCamelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__lowerCamelCase = value
return serializable_config_dict
| 29
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 1
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[Any] = 'data2vec-audio'
def __init__( self: Any , UpperCamelCase_: Tuple=32 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Any=12 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: List[str]=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Any=1E-5 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_: int=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_: str=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: str=16 , UpperCamelCase_: int=19 , UpperCamelCase_: str=5 , UpperCamelCase_: Any=0.05 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Optional[Any]=10 , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: str="sum" , UpperCamelCase_: Dict=False , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Union[str, Any]=2_56 , UpperCamelCase_: int=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCamelCase_: List[str]=(5, 3, 3, 1, 1) , UpperCamelCase_: Union[str, Any]=(1, 2, 3, 1, 1) , UpperCamelCase_: Any=5_12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[str]=1 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: str=False , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: str=3 , UpperCamelCase_: Tuple=None , **UpperCamelCase_: List[str] , ):
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = conv_pos_kernel_size
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# adapter
__lowerCamelCase = add_adapter
__lowerCamelCase = adapter_kernel_size
__lowerCamelCase = adapter_stride
__lowerCamelCase = num_adapter_layers
__lowerCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = xvector_output_dim
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return math.prod(self.conv_stride )
| 29
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
UpperCAmelCase_ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
UpperCAmelCase_ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__lowerCamelCase = bs[:]
__lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
__lowerCamelCase = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
return pairs
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = ['input_ids', 'attention_mask']
def __init__( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]="replace" , UpperCamelCase_: str="<s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Optional[Any]="</s>" , UpperCamelCase_: Any="<s>" , UpperCamelCase_: Optional[Any]="<unk>" , UpperCamelCase_: int="<pad>" , UpperCamelCase_: Tuple="<mask>" , UpperCamelCase_: str=False , **UpperCamelCase_: Any , ):
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
__lowerCamelCase = json.load(UpperCamelCase_ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
__lowerCamelCase = errors # how to handle errors in decoding
__lowerCamelCase = bytes_to_unicode()
__lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle:
__lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {}
__lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase__ ( self: Tuple ):
return len(self.encoder )
def lowerCAmelCase__ ( self: Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(UpperCamelCase_ )
__lowerCamelCase = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__lowerCamelCase = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase, __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(UpperCamelCase_ ):
try:
__lowerCamelCase = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(UpperCamelCase_ )
__lowerCamelCase = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__lowerCamelCase = get_pairs(UpperCamelCase_ )
__lowerCamelCase = """ """.join(UpperCamelCase_ )
__lowerCamelCase = word
return word
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int ):
__lowerCamelCase = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__lowerCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Any ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
return self.decoder.get(UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = """""".join(UpperCamelCase_ )
__lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + """\n""" )
__lowerCamelCase = 0
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__lowerCamelCase = token_index
writer.write(""" """.join(UpperCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int=False , **UpperCamelCase_: Dict ):
__lowerCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__lowerCamelCase = """ """ + text
return (text, kwargs)
| 29
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: Optional[int] , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: int , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 29
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCAmelCase_ = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase__ ( A__ : str = "dhaka" , A__ : int = 5 ):
'''simple docstring'''
__lowerCamelCase = min(A__ , 50 ) # Prevent abuse!
__lowerCamelCase = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
__lowerCamelCase = requests.get("""https://www.google.com/search""" , params=A__ , headers=A__ )
__lowerCamelCase = BeautifulSoup(html.text , """html.parser""" )
__lowerCamelCase = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
__lowerCamelCase = json.dumps(A__ )
__lowerCamelCase = json.loads(A__ )
__lowerCamelCase = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , A__ , )
if not matched_google_image_data:
return 0
__lowerCamelCase = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(A__ ) , )
__lowerCamelCase = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , A__ , )
for index, fixed_full_res_image in enumerate(A__ ):
if index >= max_images:
return index
__lowerCamelCase = bytes(A__ , """ascii""" ).decode(
"""unicode-escape""" )
__lowerCamelCase = bytes(A__ , """ascii""" ).decode(
"""unicode-escape""" )
__lowerCamelCase = urllib.request.build_opener()
__lowerCamelCase = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(A__ )
__lowerCamelCase = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(A__ ):
os.makedirs(A__ )
urllib.request.urlretrieve( # noqa: S310
A__ , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
UpperCAmelCase_ = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 29
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 1
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: Optional[int] , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: str , **UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Any ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Union[str, Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: List[str] , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Union[str, Any] , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 29
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 1
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 1
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : float , A__ : float , A__ : float , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 1
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any]=10 ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase__ ( A__ : Dict , A__ : Dict=10 ):
'''simple docstring'''
__lowerCamelCase = []
for step in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(A__ , """schedule.bin""" )
torch.save(scheduler.state_dict() , A__ )
__lowerCamelCase = torch.load(A__ )
scheduler.load_state_dict(A__ )
return lrs
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
__lowerCamelCase = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(10_00 ):
__lowerCamelCase = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : Dict = nn.Linear(50 , 50) if is_torch_available() else None
UpperCAmelCase__ : int = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
UpperCAmelCase__ : List[str] = 10
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowerCamelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__lowerCamelCase, __lowerCamelCase = data
__lowerCamelCase = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowerCamelCase = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'failed for {scheduler_func} in normal scheduler' , )
__lowerCamelCase = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
__lowerCamelCase = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'failed for {scheduler_func} in save and reload' )
class lowerCamelCase__:
def __init__( self: Dict , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = fn
def __call__( self: str , *UpperCamelCase_: Any , **UpperCamelCase_: Any ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = list(map(self , scheduler.lr_lambdas ) )
| 29
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 1
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 1
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 1
|
UpperCAmelCase_ = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def lowerCamelCase__ ( A__ : float ):
'''simple docstring'''
assert type(A__ ) in (int, float) and decimal == int(A__ )
__lowerCamelCase = int(A__ )
__lowerCamelCase = """"""
__lowerCamelCase = False
if decimal < 0:
__lowerCamelCase = True
decimal *= -1
while decimal > 0:
__lowerCamelCase, __lowerCamelCase = divmod(A__ , 16 )
__lowerCamelCase = values[remainder] + hexadecimal
__lowerCamelCase = """0x""" + hexadecimal
if negative:
__lowerCamelCase = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
| 1
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = ['pixel_values']
def __init__( self: Dict , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: bool = True , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = size if size is not None else {"""height""": 3_84, """width""": 3_84}
__lowerCamelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCamelCase = do_convert_rgb
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Optional[int] , ):
__lowerCamelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
__lowerCamelCase = (size["""height"""], size["""width"""])
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[int, float] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Dict , ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Optional[Any] , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Dict[str, int]] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: bool = None , UpperCamelCase_: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCamelCase = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=UpperCamelCase_ )
return encoded_outputs
| 29
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 1
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
|
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29
| 1
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29
| 1
|
import operator
def lowerCamelCase__ ( A__ : list , A__ : bool = False , A__ : list | None = None ):
'''simple docstring'''
__lowerCamelCase = operator.lt if reverse else operator.gt
__lowerCamelCase = solution or []
if not arr:
return solution
__lowerCamelCase = [arr.pop(0 )]
for i, item in enumerate(A__ ):
if _operator(A__ , sublist[-1] ):
sublist.append(A__ )
arr.pop(A__ )
# merging sublist into solution list
if not solution:
solution.extend(A__ )
else:
while sublist:
__lowerCamelCase = sublist.pop(0 )
for i, xx in enumerate(A__ ):
if not _operator(A__ , A__ ):
solution.insert(A__ , A__ )
break
else:
solution.append(A__ )
strand_sort(A__ , A__ , A__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 29
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = BertJapaneseTokenizer
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = True
def lowerCAmelCase__ ( self: str ):
super().setUp()
__lowerCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict ):
__lowerCamelCase = """こんにちは、世界。 \nこんばんは、世界。"""
__lowerCamelCase = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase, __lowerCamelCase = self.get_input_output_texts(UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def lowerCAmelCase__ ( self: List[Any] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: List[Any] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file )
__lowerCamelCase = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(UpperCamelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(UpperCamelCase_ )
__lowerCamelCase = """こんにちは、世界。\nこんばんは、世界。"""
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowerCamelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase_ , """wb""" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , """rb""" ) as handle:
__lowerCamelCase = pickle.load(UpperCamelCase_ )
__lowerCamelCase = tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase__ ( self: str ):
try:
__lowerCamelCase = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase__ ( self: List[Any] ):
try:
__lowerCamelCase = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = MecabTokenizer(do_lower_case=UpperCamelCase_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase__ ( self: int ):
try:
__lowerCamelCase = MecabTokenizer(
do_lower_case=UpperCamelCase_ , normalize_text=UpperCamelCase_ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MecabTokenizer(normalize_text=UpperCamelCase_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(UpperCamelCase_ )
__lowerCamelCase = """こんにちは、世界。\nこんばんは、世界。"""
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowerCamelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase_ , """wb""" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , """rb""" ) as handle:
__lowerCamelCase = pickle.load(UpperCamelCase_ )
__lowerCamelCase = tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_sudachi
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = SudachiTokenizer(do_lower_case=UpperCamelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = SudachiTokenizer(normalize_text=UpperCamelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SudachiTokenizer(trim_whitespace=UpperCamelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(UpperCamelCase_ )
__lowerCamelCase = """こんにちは、世界。\nこんばんは、世界。"""
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowerCamelCase = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase_ , """wb""" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , """rb""" ) as handle:
__lowerCamelCase = pickle.load(UpperCamelCase_ )
__lowerCamelCase = tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_jumanpp
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = JumanppTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = JumanppTokenizer(normalize_text=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = JumanppTokenizer(trim_whitespace=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__lowerCamelCase = {}
for i, token in enumerate(UpperCamelCase_ ):
__lowerCamelCase = i
__lowerCamelCase = WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
__lowerCamelCase = tokenizer.subword_tokenizer
__lowerCamelCase = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(UpperCamelCase_ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
__lowerCamelCase = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(UpperCamelCase_ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
__lowerCamelCase = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = BertJapaneseTokenizer
UpperCAmelCase__ : List[Any] = False
def lowerCAmelCase__ ( self: str ):
super().setUp()
__lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self: List[Any] , **UpperCamelCase_: List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[str] ):
__lowerCamelCase = """こんにちは、世界。 \nこんばんは、世界。"""
__lowerCamelCase = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def lowerCAmelCase__ ( self: Tuple ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: Optional[Any] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: List[str] ):
pass # TODO add if relevant
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
__lowerCamelCase = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
UpperCamelCase_ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowerCamelCase = {}
for i, token in enumerate(UpperCamelCase_ ):
__lowerCamelCase = i
__lowerCamelCase = CharacterTokenizer(vocab=UpperCamelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
__lowerCamelCase = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = """cl-tohoku/bert-base-japanese"""
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
__lowerCamelCase = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 29
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
UpperCAmelCase_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
UpperCAmelCase_ = 3
UpperCAmelCase_ = 4
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Tuple = 'left'
def __init__( self: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple=False , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=False , UpperCamelCase_: Dict="<s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Any="<unk>" , UpperCamelCase_: Optional[int]="<sep>" , UpperCamelCase_: List[str]="<pad>" , UpperCamelCase_: List[Any]="<cls>" , UpperCamelCase_: Union[str, Any]="<mask>" , UpperCamelCase_: List[str]=["<eop>", "<eod>"] , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: str , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__lowerCamelCase = 3
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: Dict ):
return len(self.sp_model )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Any ):
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] ):
if self.remove_space:
__lowerCamelCase = """ """.join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize("""NFKD""" , UpperCamelCase_ )
__lowerCamelCase = """""".join([c for c in outputs if not unicodedata.combining(UpperCamelCase_ )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str ):
__lowerCamelCase = self.preprocess_text(UpperCamelCase_ )
__lowerCamelCase = self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
__lowerCamelCase = []
for piece in pieces:
if len(UpperCamelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase_ )
else:
new_pieces.append(UpperCamelCase_ )
return new_pieces
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] ):
return self.sp_model.PieceToId(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = """""".join(UpperCamelCase_ ).replace(UpperCamelCase_ , """ """ ).strip()
return out_string
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[int] , UpperCamelCase_: bool = False , UpperCamelCase_: bool = None , UpperCamelCase_: bool = True , **UpperCamelCase_: Union[str, Any] , ):
__lowerCamelCase = kwargs.pop("""use_source_tokenizer""" , UpperCamelCase_ )
__lowerCamelCase = self.convert_ids_to_tokens(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCamelCase = []
__lowerCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
__lowerCamelCase = []
sub_texts.append(UpperCamelCase_ )
else:
current_sub_text.append(UpperCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowerCamelCase = """""".join(UpperCamelCase_ )
__lowerCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCamelCase = self.clean_up_tokenization(UpperCamelCase_ )
return clean_text
else:
return text
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1, 1]
return ([0] * len(UpperCamelCase_ )) + [1, 1]
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 29
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A__ : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowerCamelCase__( __lowerCamelCase):
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: ArgumentParser ):
__lowerCamelCase = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=UpperCamelCase_ , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F'Loading model {model_type}' )
__lowerCamelCase = model_type
__lowerCamelCase = tf_checkpoint
__lowerCamelCase = pytorch_dump_output
__lowerCamelCase = config
__lowerCamelCase = finetuning_task_name
def lowerCAmelCase__ ( self: Any ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
__lowerCamelCase = self._tf_checkpoint
__lowerCamelCase = """"""
else:
__lowerCamelCase = self._tf_checkpoint
__lowerCamelCase = """"""
convert_transfo_xl_checkpoint_to_pytorch(
UpperCamelCase_ , self._config , self._pytorch_dump_output , UpperCamelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 29
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 1
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'xlm-prophetnet'
UpperCAmelCase__ : Tuple = ['past_key_values']
UpperCAmelCase__ : Union[str, Any] = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self: Optional[Any] , UpperCamelCase_: Optional[float] = 0.1 , UpperCamelCase_: Optional[Union[str, Callable]] = "gelu" , UpperCamelCase_: Optional[int] = 3_05_22 , UpperCamelCase_: Optional[int] = 10_24 , UpperCamelCase_: Optional[int] = 40_96 , UpperCamelCase_: Optional[int] = 12 , UpperCamelCase_: Optional[int] = 16 , UpperCamelCase_: Optional[int] = 40_96 , UpperCamelCase_: Optional[int] = 12 , UpperCamelCase_: Optional[int] = 16 , UpperCamelCase_: Optional[float] = 0.1 , UpperCamelCase_: Optional[float] = 0.1 , UpperCamelCase_: Optional[int] = 5_12 , UpperCamelCase_: Optional[float] = 0.02 , UpperCamelCase_: Optional[bool] = True , UpperCamelCase_: Optional[bool] = True , UpperCamelCase_: Optional[int] = 0 , UpperCamelCase_: Optional[int] = 2 , UpperCamelCase_: Optional[int] = 32 , UpperCamelCase_: Optional[int] = 1_28 , UpperCamelCase_: Optional[bool] = False , UpperCamelCase_: Optional[float] = 0.0 , UpperCamelCase_: Optional[bool] = True , UpperCamelCase_: Optional[int] = 0 , UpperCamelCase_: Optional[int] = 1 , UpperCamelCase_: Optional[int] = 2 , **UpperCamelCase_: str , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = num_encoder_layers
__lowerCamelCase = num_encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = num_decoder_attention_heads
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = init_std # Normal(0, this parameter)
__lowerCamelCase = activation_function
# parameters for xlmprophetnet
__lowerCamelCase = ngram
__lowerCamelCase = num_buckets
__lowerCamelCase = relative_max_distance
__lowerCamelCase = disable_ngram_loss
__lowerCamelCase = eps
# 3 Types of Dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = dropout
__lowerCamelCase = use_cache
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , add_cross_attention=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
@property
def lowerCAmelCase__ ( self: Dict ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 29
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'funnel'
UpperCAmelCase__ : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self: str , UpperCamelCase_: Optional[Any]=3_05_22 , UpperCamelCase_: Optional[Any]=[4, 4, 4] , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Optional[Any]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Optional[Any]=64 , UpperCamelCase_: Any=30_72 , UpperCamelCase_: Optional[Any]="gelu_new" , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Dict=None , UpperCamelCase_: Optional[int]=1E-9 , UpperCamelCase_: Optional[Any]="mean" , UpperCamelCase_: Any="relative_shift" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=True , UpperCamelCase_: Union[str, Any]=True , **UpperCamelCase_: List[str] , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = [1] * len(UpperCamelCase_ ) if block_repeats is None else block_repeats
assert len(UpperCamelCase_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = initializer_range
__lowerCamelCase = initializer_std
__lowerCamelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
__lowerCamelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
__lowerCamelCase = attention_type
__lowerCamelCase = separate_cls
__lowerCamelCase = truncate_seq
__lowerCamelCase = pool_q_only
super().__init__(**UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: Tuple ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tuple ):
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 29
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 1
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCamelCase__ ( A__ : Tuple , A__ : List[Any] , A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
__lowerCamelCase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , A__ , )
is not None
):
__lowerCamelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowerCamelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowerCamelCase = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__lowerCamelCase = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__lowerCamelCase = True
if not attribute_used:
__lowerCamelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowerCamelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowerCamelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowerCamelCase = True
elif attribute.endswith("""_token_id""" ):
__lowerCamelCase = True
# configuration class specific cases
if not case_allowed:
__lowerCamelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__lowerCamelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowerCamelCase = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__lowerCamelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowerCamelCase = {}
if len(config_class.attribute_map ) > 0:
__lowerCamelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowerCamelCase = inspect.getsourcefile(A__ )
__lowerCamelCase = os.path.dirname(A__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowerCamelCase = [os.path.join(A__ , A__ ) for fn in os.listdir(A__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__lowerCamelCase = []
for path in modeling_paths:
if os.path.isfile(A__ ):
with open(A__ ) as fp:
modeling_sources.append(fp.read() )
__lowerCamelCase = []
for config_param, default_value in zip(A__ , A__ ):
# `attributes` here is all the variant names for `config_param`
__lowerCamelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A__ , A__ , A__ , A__ ):
unused_attributes.append(attributes[0] )
return sorted(A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowerCamelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A__ : inspect.isclass(A__ )
and issubclass(A__ , A__ )
and inspect.getmodule(A__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__lowerCamelCase = check_config_attributes_being_used(A__ )
if len(A__ ) > 0:
__lowerCamelCase = unused_attributes
if len(A__ ) > 0:
__lowerCamelCase = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(A__ )
if __name__ == "__main__":
check_config_attributes()
| 29
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
if "resnet-50" in model_name:
__lowerCamelCase = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
__lowerCamelCase = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
__lowerCamelCase = DetrConfig(use_timm_backbone=A__ , backbone_config=A__ )
# set label attributes
__lowerCamelCase = """panoptic""" in model_name
if is_panoptic:
__lowerCamelCase = 250
else:
__lowerCamelCase = 91
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """coco-detection-id2label.json"""
__lowerCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def lowerCamelCase__ ( A__ : Tuple , A__ : List[str] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Any , A__ : Optional[int]=False ):
'''simple docstring'''
__lowerCamelCase = """"""
if is_panoptic:
__lowerCamelCase = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__lowerCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:256, :]
__lowerCamelCase = in_proj_bias[:256]
__lowerCamelCase = in_proj_weight[256:512, :]
__lowerCamelCase = in_proj_bias[256:512]
__lowerCamelCase = in_proj_weight[-256:, :]
__lowerCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__lowerCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:256, :]
__lowerCamelCase = in_proj_bias[:256]
__lowerCamelCase = in_proj_weight[256:512, :]
__lowerCamelCase = in_proj_bias[256:512]
__lowerCamelCase = in_proj_weight[-256:, :]
__lowerCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCamelCase = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
__lowerCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCamelCase = in_proj_weight_cross_attn[:256, :]
__lowerCamelCase = in_proj_bias_cross_attn[:256]
__lowerCamelCase = in_proj_weight_cross_attn[256:512, :]
__lowerCamelCase = in_proj_bias_cross_attn[256:512]
__lowerCamelCase = in_proj_weight_cross_attn[-256:, :]
__lowerCamelCase = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : int , A__ : Dict=None , A__ : List[str]=False ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_detr_config(A__ )
# load original model from torch hub
__lowerCamelCase = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f'Converting model {model_name}...' )
__lowerCamelCase = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=A__ ).eval()
__lowerCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(A__ ):
if is_panoptic:
__lowerCamelCase = """detr.""" + src
rename_key(A__ , A__ , A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = DetrForSegmentation(A__ ) if is_panoptic else DetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion on an image
__lowerCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
__lowerCamelCase = DetrImageProcessor(format=A__ )
__lowerCamelCase = processor(images=prepare_img() , return_tensors="""pt""" )
__lowerCamelCase = encoding["""pixel_values"""]
__lowerCamelCase = detr(A__ )
__lowerCamelCase = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCAmelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : tuple[int, int] , A__ : int ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = position
__lowerCamelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__lowerCamelCase = []
for position in positions:
__lowerCamelCase, __lowerCamelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A__ )
return permissible_positions
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : tuple[int, int] , A__ : int ):
'''simple docstring'''
if is_complete(A__ ):
return True
for position in get_valid_pos(A__ , len(A__ ) ):
__lowerCamelCase, __lowerCamelCase = position
if board[y][x] == 0:
__lowerCamelCase = curr + 1
if open_knight_tour_helper(A__ , A__ , curr + 1 ):
return True
__lowerCamelCase = 0
return False
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for i in range(A__ )] for j in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
__lowerCamelCase = 1
if open_knight_tour_helper(A__ , (i, j) , 1 ):
return board
__lowerCamelCase = 0
__lowerCamelCase = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 1
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A__ : Dict , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__lowerCamelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowerCamelCase = v
else:
__lowerCamelCase = v
__lowerCamelCase = chkpt["""params"""]
__lowerCamelCase = {n: v for n, v in config.items() if not isinstance(A__ , (torch.FloatTensor, numpy.ndarray) )}
__lowerCamelCase = chkpt["""dico_word2id"""]
__lowerCamelCase = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
__lowerCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__lowerCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__lowerCamelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(A__ , A__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(A__ , indent=2 ) + """\n""" )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(A__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 29
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'Salesforce/blip-image-captioning-base'
UpperCAmelCase__ : Optional[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
UpperCAmelCase__ : Optional[Any] = 'image_captioner'
UpperCAmelCase__ : List[Any] = AutoModelForVisionaSeq
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['text']
def __init__( self: Union[str, Any] , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: "Image" ):
return self.pre_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict ):
return self.model.generate(**UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[Any] ):
return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0].strip()
| 29
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 1
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def lowerCamelCase__ ( A__ : Path , A__ : list ):
'''simple docstring'''
__lowerCamelCase = """\n""".join(A__ )
Path(A__ ).open("""w""" ).writelines(A__ )
UpperCAmelCase_ = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase_ = 'sshleifer/bart-tiny-random'
UpperCAmelCase_ = 'sshleifer/tiny-mbart'
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Tuple ):
__lowerCamelCase = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCamelCase = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCamelCase = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
__lowerCamelCase = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCamelCase = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(UpperCamelCase_ , """argv""" , UpperCamelCase_ ):
run_generate()
assert Path(UpperCamelCase_ ).exists()
# os.remove(Path(output_file_name))
def lowerCAmelCase__ ( self: Optional[Any] ):
self.run_eval_tester(UpperCamelCase_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Tuple ):
self.run_eval_tester(UpperCamelCase_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[Any] ):
__lowerCamelCase = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCamelCase = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCamelCase = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__lowerCamelCase = Path(self.get_auto_remove_tmp_dir() )
__lowerCamelCase = str(tmp_dir / """scores.json""" )
__lowerCamelCase = str(tmp_dir / """val.target""" )
_dump_articles(UpperCamelCase_ , text["""en"""] )
_dump_articles(UpperCamelCase_ , text["""de"""] )
__lowerCamelCase = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCamelCase = F'\n run_eval_search.py\n {model}\n {str(UpperCamelCase_ )}\n {str(UpperCamelCase_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(UpperCamelCase_ , """argv""" , UpperCamelCase_ ):
with CaptureStdout() as cs:
run_search()
__lowerCamelCase = [""" num_beams | length_penalty""", model, """Best score args"""]
__lowerCamelCase = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(UpperCamelCase_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCamelCase_ ).exists()
os.remove(Path(UpperCamelCase_ ) )
| 29
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCAmelCase_ = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 1
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=13 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Dict=False , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Union[str, Any]=99 , UpperCamelCase_: Dict=32 , UpperCamelCase_: Union[str, Any]=5 , UpperCamelCase_: int=4 , UpperCamelCase_: Dict=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Any=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Any=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self: Optional[int] ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Any ):
__lowerCamelCase = OpenLlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: str , ):
__lowerCamelCase = True
__lowerCamelCase = OpenLlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Dict , ):
__lowerCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , ):
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
__lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase__ : Dict = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = OpenLlamaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = input_dict["""input_ids"""]
__lowerCamelCase = input_ids.ne(1 ).to(UpperCamelCase_ )
__lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = """single_label_classification"""
__lowerCamelCase = input_dict["""input_ids"""]
__lowerCamelCase = input_ids.ne(1 ).to(UpperCamelCase_ )
__lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = """multi_label_classification"""
__lowerCamelCase = input_dict["""input_ids"""]
__lowerCamelCase = input_ids.ne(1 ).to(UpperCamelCase_ )
__lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def lowerCAmelCase__ ( self: Dict ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = ids_tensor([1, 10] , config.vocab_size )
__lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCamelCase = OpenLlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
__lowerCamelCase = original_model(UpperCamelCase_ ).last_hidden_state
__lowerCamelCase = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCamelCase = {"""type""": scaling_type, """factor""": 10.0}
__lowerCamelCase = OpenLlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
__lowerCamelCase = scaled_model(UpperCamelCase_ ).last_hidden_state
__lowerCamelCase = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 29
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: str = "cpu" , UpperCamelCase_: str = "openai/clip-vit-large-patch14" ):
__lowerCamelCase = device
__lowerCamelCase = CLIPTokenizerFast.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073]
__lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__lowerCamelCase = torchvision.transforms.Resize(2_24 )
__lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.resize(UpperCamelCase_ )
__lowerCamelCase = self.center_crop(UpperCamelCase_ )
__lowerCamelCase = self.normalize(UpperCamelCase_ )
return images
def __call__( self: Any , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[Any]=None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = self.tokenizer(text=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = self.preprocess_img(UpperCamelCase_ )
__lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCamelCase__( nn.Module):
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.01 , UpperCamelCase_: List[str]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: str=None , UpperCamelCase_: str=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str=None , UpperCamelCase_: List[str]=False , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Union[str, Any]="image" , UpperCamelCase_: str=True , UpperCamelCase_: str=False , UpperCamelCase_: str=False , UpperCamelCase_: Optional[int]=False , ):
super().__init__()
__lowerCamelCase = None
__lowerCamelCase = device if device else get_device()
if vqgan:
__lowerCamelCase = vqgan
else:
__lowerCamelCase = load_vqgan(self.device , conf_path=UpperCamelCase_ , ckpt_path=UpperCamelCase_ )
self.vqgan.eval()
if clip:
__lowerCamelCase = clip
else:
__lowerCamelCase = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__lowerCamelCase = ProcessorGradientFlow(device=self.device )
__lowerCamelCase = iterations
__lowerCamelCase = lr
__lowerCamelCase = log
__lowerCamelCase = make_grid
__lowerCamelCase = return_val
__lowerCamelCase = quantize
__lowerCamelCase = self.vqgan.decoder.z_shape
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]=5 , UpperCamelCase_: int=True ):
__lowerCamelCase = []
if output_path is None:
__lowerCamelCase = """./animation.gif"""
if input_path is None:
__lowerCamelCase = self.save_path
__lowerCamelCase = sorted(glob(input_path + """/*""" ) )
if not len(UpperCamelCase_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(UpperCamelCase_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__lowerCamelCase = total_duration / len(UpperCamelCase_ )
__lowerCamelCase = [frame_duration] * len(UpperCamelCase_ )
if extend_frames:
__lowerCamelCase = 1.5
__lowerCamelCase = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(UpperCamelCase_ ) )
imageio.mimsave(UpperCamelCase_ , UpperCamelCase_ , duration=UpperCamelCase_ )
print(F'gif saved to {output_path}' )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict=None , UpperCamelCase_: Dict=None ):
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__lowerCamelCase = preprocess(Image.open(UpperCamelCase_ ) , target_image_size=2_56 ).to(self.device )
__lowerCamelCase = preprocess_vqgan(UpperCamelCase_ )
__lowerCamelCase, *__lowerCamelCase = self.vqgan.encode(UpperCamelCase_ )
return z
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.latent.detach().requires_grad_()
__lowerCamelCase = base_latent + transform_vector
if self.quantize:
__lowerCamelCase, *__lowerCamelCase = self.vqgan.quantize(UpperCamelCase_ )
else:
__lowerCamelCase = trans_latent
return self.vqgan.decode(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None ):
__lowerCamelCase = self.clip_preprocessor(text=UpperCamelCase_ , images=UpperCamelCase_ , return_tensors="""pt""" , padding=UpperCamelCase_ )
__lowerCamelCase = self.clip(**UpperCamelCase_ )
__lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
__lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = self._get_clip_similarity(pos_prompts["""prompts"""] , UpperCamelCase_ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__lowerCamelCase = self._get_clip_similarity(neg_prompts["""prompts"""] , UpperCamelCase_ , weights=neg_prompts["""weights"""] )
else:
__lowerCamelCase = torch.tensor([1] , device=self.device )
__lowerCamelCase = -torch.log(UpperCamelCase_ ) + torch.log(UpperCamelCase_ )
return loss
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = torch.randn_like(self.latent , requires_grad=UpperCamelCase_ , device=self.device )
__lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowerCamelCase = self._add_vector(UpperCamelCase_ )
__lowerCamelCase = loop_post_process(UpperCamelCase_ )
__lowerCamelCase = self._get_CLIP_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print("""CLIP loss""" , UpperCamelCase_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] ):
wandb.init(reinit=UpperCamelCase_ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__lowerCamelCase = Image.open(UpperCamelCase_ )
__lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str ):
if not prompts:
return []
__lowerCamelCase = []
__lowerCamelCase = []
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(UpperCamelCase_ , (tuple, list) ):
__lowerCamelCase = prompt[0]
__lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
__lowerCamelCase, __lowerCamelCase = prompt.split(""":""" )
__lowerCamelCase = float(UpperCamelCase_ )
else:
__lowerCamelCase = prompt
__lowerCamelCase = 1.0
processed_prompts.append(UpperCamelCase_ )
weights.append(UpperCamelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase_ , device=self.device ),
}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=True , UpperCamelCase_: Tuple=False , UpperCamelCase_: Tuple=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=None , ):
if image_path:
__lowerCamelCase = self._get_latent(UpperCamelCase_ )
else:
__lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
__lowerCamelCase = self.process_prompts(UpperCamelCase_ )
__lowerCamelCase = self.process_prompts(UpperCamelCase_ )
if save_final and save_path is None:
__lowerCamelCase = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
else:
__lowerCamelCase = save_path + """_""" + get_timestamp()
os.makedirs(UpperCamelCase_ )
__lowerCamelCase = save_path
__lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(UpperCamelCase_ ) )
__lowerCamelCase = loop_post_process(UpperCamelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ):
if show_intermediate:
show_pil(UpperCamelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(UpperCamelCase_ )} )
if show_final:
show_pil(UpperCamelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 29
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
| 1
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = tokenizer(example["""content"""] , truncation=A__ )["""input_ids"""]
__lowerCamelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase_ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ = multiprocessing.cpu_count()
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = load_dataset(args.dataset_name, split='train')
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
UpperCAmelCase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 29
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = list(range(len(A__ ) ) )
__lowerCamelCase = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
__lowerCamelCase = 0
__lowerCamelCase = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
__lowerCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
__lowerCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return [ord(A__ ) - 96 for elem in plain]
def lowerCamelCase__ ( A__ : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , A__ )
print("""Decoded:""" , decode(A__ ) )
if __name__ == "__main__":
main()
| 29
|
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=13 , UpperCamelCase_: Any=7 , UpperCamelCase_: Optional[int]=6 , UpperCamelCase_: Any=17 , UpperCamelCase_: List[Any]=23 , UpperCamelCase_: Optional[Any]=11 , UpperCamelCase_: Optional[Any]=True , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = act_dim
__lowerCamelCase = state_dim
__lowerCamelCase = hidden_size
__lowerCamelCase = max_length
__lowerCamelCase = is_training
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
__lowerCamelCase = random_attention_mask((self.batch_size, self.seq_length) )
__lowerCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCAmelCase__ ( self: Union[str, Any] ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , ):
__lowerCamelCase = DecisionTransformerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCAmelCase__ : Dict = ()
UpperCAmelCase__ : Union[str, Any] = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCAmelCase__ : Dict = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Any = False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = DecisionTransformerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DecisionTransformerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(UpperCamelCase_ )] , UpperCamelCase_ )
@require_torch
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = 2 # number of steps of autoregressive prediction we will perform
__lowerCamelCase = 10 # defined by the RL environment, may be normalized
__lowerCamelCase = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
__lowerCamelCase = model.to(UpperCamelCase_ )
__lowerCamelCase = model.config
torch.manual_seed(0 )
__lowerCamelCase = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ) # env.reset()
__lowerCamelCase = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=UpperCamelCase_ )
__lowerCamelCase = torch.tensor(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__lowerCamelCase = state
__lowerCamelCase = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase_ , dtype=torch.floataa )
__lowerCamelCase = torch.zeros(1 , 0 , device=UpperCamelCase_ , dtype=torch.floataa )
__lowerCamelCase = torch.tensor(0 , device=UpperCamelCase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase_ ):
__lowerCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase_ )] , dim=1 )
__lowerCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase_ )] , dim=1 )
__lowerCamelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = model(
states=UpperCamelCase_ , actions=UpperCamelCase_ , rewards=UpperCamelCase_ , returns_to_go=UpperCamelCase_ , timesteps=UpperCamelCase_ , attention_mask=UpperCamelCase_ , return_dict=UpperCamelCase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
__lowerCamelCase = action_pred[0, -1]
__lowerCamelCase = torch.cat([states, state] , dim=1 )
__lowerCamelCase = returns_to_go[0, -1] - reward
__lowerCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__lowerCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 29
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29
| 1
|
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
__lowerCamelCase = int(np.ceil((x_end - xa) / step_size ) )
__lowerCamelCase = np.zeros((n + 1,) )
__lowerCamelCase = ya
__lowerCamelCase = xa
for k in range(A__ ):
__lowerCamelCase = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 1
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 1
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase__( metaclass=__lowerCamelCase):
UpperCAmelCase__ : List[str] = ['speech']
def __init__( self: List[str] , *UpperCamelCase_: int , **UpperCamelCase_: Dict ):
requires_backends(self , ["""speech"""] )
class lowerCamelCase__( metaclass=__lowerCamelCase):
UpperCAmelCase__ : Dict = ['speech']
def __init__( self: List[str] , *UpperCamelCase_: Any , **UpperCamelCase_: Any ):
requires_backends(self , ["""speech"""] )
| 29
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = (DDIMParallelScheduler,)
UpperCAmelCase__ : Any = (('eta', 0.0), ('num_inference_steps', 50))
def lowerCAmelCase__ ( self: Optional[int] , **UpperCamelCase_: Any ):
__lowerCamelCase = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCAmelCase__ ( self: Union[str, Any] , **UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(**UpperCamelCase_ )
__lowerCamelCase = scheduler_class(**UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = 10, 0.0
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for t in scheduler.timesteps:
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def lowerCAmelCase__ ( self: str ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCamelCase = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def lowerCAmelCase__ ( self: Dict ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Optional[int] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = 10, 0.0
scheduler.set_timesteps(UpperCamelCase_ )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
__lowerCamelCase = self.dummy_sample_deter + 0.1
__lowerCamelCase = self.dummy_sample_deter - 0.1
__lowerCamelCase = samplea.shape[0]
__lowerCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCamelCase = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ )
__lowerCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCamelCase = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ )
__lowerCamelCase = torch.sum(torch.abs(UpperCamelCase_ ) )
__lowerCamelCase = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.full_loop()
__lowerCamelCase = torch.sum(torch.abs(UpperCamelCase_ ) )
__lowerCamelCase = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.full_loop(prediction_type="""v_prediction""" )
__lowerCamelCase = torch.sum(torch.abs(UpperCamelCase_ ) )
__lowerCamelCase = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowerCAmelCase__ ( self: Any ):
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
__lowerCamelCase = torch.sum(torch.abs(UpperCamelCase_ ) )
__lowerCamelCase = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowerCAmelCase__ ( self: Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
__lowerCamelCase = torch.sum(torch.abs(UpperCamelCase_ ) )
__lowerCamelCase = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 29
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 1
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCAmelCase_ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase__ ( A__ : str , A__ : Union[str, Any] ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = _TestCommandArgs(dataset=A__ , all_configs=A__ , save_infos=A__ )
__lowerCamelCase = TestCommand(*A__ )
test_command.run()
__lowerCamelCase = os.path.join(A__ , """README.md""" )
assert os.path.exists(A__ )
__lowerCamelCase = DatasetInfosDict.from_directory(A__ )
__lowerCamelCase = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2351563,
"""num_examples""": 10000,
},
{
"""name""": """validation""",
"""num_bytes""": 238418,
"""num_examples""": 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__lowerCamelCase, __lowerCamelCase = getattr(dataset_infos["""default"""] , A__ ), getattr(expected_dataset_infos["""default"""] , A__ )
if key == "num_bytes":
assert is_apercent_close(A__ , A__ )
elif key == "splits":
assert list(A__ ) == list(A__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 29
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 1
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase__:
def __init__( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: str=32 , UpperCamelCase_: Dict=16 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: int=4 , UpperCamelCase_: Optional[int]=[0, 1, 2, 3] , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Any=3 , UpperCamelCase_: Tuple=[1, 3_84, 24, 24] , UpperCamelCase_: Any=True , UpperCamelCase_: List[Any]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = backbone_out_indices
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = backbone_featmap_shape
__lowerCamelCase = scope
__lowerCamelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = num_patches + 1
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCamelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = DPTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = DPTForDepthEstimation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = DPTForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : List[Any] = False
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
if model_class in get_values(UpperCamelCase_ ):
continue
__lowerCamelCase = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
__lowerCamelCase = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = False
__lowerCamelCase = True
if model_class in get_values(UpperCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
__lowerCamelCase = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
__lowerCamelCase = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
__lowerCamelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__lowerCamelCase = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: Optional[int] ):
pass
@slow
def lowerCAmelCase__ ( self: str ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__lowerCamelCase = DPTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = """add"""
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = DPTForDepthEstimation(UpperCamelCase_ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__lowerCamelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(UpperCamelCase_ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
__lowerCamelCase = outputs.predicted_depth
# verify the predicted depth
__lowerCamelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , UpperCamelCase_ , atol=1E-4 ) )
| 29
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = XLNetTokenizer
UpperCAmelCase__ : Optional[int] = XLNetTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : str = True
def lowerCAmelCase__ ( self: Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = """<s>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(UpperCamelCase_ ) , 10_06 )
def lowerCAmelCase__ ( self: Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = XLNetTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 29
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
UpperCAmelCase_ = True
from torch.cuda.amp import autocast
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCAmelCase__ : Optional[bool] = field(
default=__lowerCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
UpperCAmelCase__ : Optional[bool] = field(
default=__lowerCamelCase , metadata={'help': 'Whether to log verbose messages or not.'} , )
UpperCAmelCase__ : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
UpperCAmelCase__ : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
UpperCAmelCase__ : Optional[float] = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'})
def lowerCamelCase__ ( A__ : ModelArguments , A__ : TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCamelCase = logging.WARNING
if model_args.verbose_logging:
__lowerCamelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__lowerCamelCase = logging.INFO
logger.setLevel(A__ )
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : str = field(
default=__lowerCamelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
UpperCAmelCase__ : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCAmelCase__ : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
UpperCAmelCase__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
UpperCAmelCase__ : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCAmelCase__ : Optional[int] = field(
default=__lowerCamelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCAmelCase__ : Optional[float] = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : WavaVecaForPreTraining
UpperCAmelCase__ : WavaVecaFeatureExtractor
UpperCAmelCase__ : Union[bool, str] = "longest"
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
def __call__( self: Optional[Any] , UpperCamelCase_: List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
__lowerCamelCase = self.feature_extractor.pad(
UpperCamelCase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
__lowerCamelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
__lowerCamelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__lowerCamelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
__lowerCamelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__lowerCamelCase = 1
__lowerCamelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__lowerCamelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCamelCase_ , min_masks=2 , )
return batch
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: int , UpperCamelCase_: Any=1 , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: List[str]=1.0 , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = 0
__lowerCamelCase = max_gumbel_temp
__lowerCamelCase = min_gumbel_temp
__lowerCamelCase = gumbel_temp_decay
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: nn.Module , UpperCamelCase_: Dict[str, Union[torch.Tensor, Any]] ):
model.train()
__lowerCamelCase = self._prepare_inputs(UpperCamelCase_ )
if self.use_amp:
with autocast():
__lowerCamelCase = self.compute_loss(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = self.compute_loss(UpperCamelCase_ , UpperCamelCase_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCamelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
__lowerCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = parser.parse_args_into_dataclasses()
configure_logger(A__ , A__ )
# Downloading and loading a dataset from the hub.
__lowerCamelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__lowerCamelCase = DatasetDict()
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__lowerCamelCase = DatasetDict()
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=A__ )
def prepare_dataset(A__ : str ):
# check that all files have the correct sampling rate
__lowerCamelCase, __lowerCamelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__lowerCamelCase = datasets.map(
A__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
__lowerCamelCase = vectorized_datasets.filter(
lambda A__ : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A__ : Dict ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__lowerCamelCase = vectorized_datasets.map(
A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__lowerCamelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
__lowerCamelCase = WavaVecaForPreTraining(A__ )
__lowerCamelCase = DataCollatorForWavaVecaPretraining(model=A__ , feature_extractor=A__ )
__lowerCamelCase = WavaVecaPreTrainer(
model=A__ , data_collator=A__ , args=A__ , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=A__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 29
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 1
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase__ ( A__ : Features ):
'''simple docstring'''
__lowerCamelCase = np.inf
def set_batch_size(A__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(A__ , A__ ):
__lowerCamelCase = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(A__ , A__ ):
__lowerCamelCase = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(A__ , A__ ) and feature.dtype == "binary":
__lowerCamelCase = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(A__ , A__ )
return None if batch_size is np.inf else batch_size
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: int , UpperCamelCase_: NestedDataStructureLike[PathLike] , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
__lowerCamelCase = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
__lowerCamelCase = Parquet(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , hash=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Dict ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__:
def __init__( self: Optional[int] , UpperCamelCase_: Dataset , UpperCamelCase_: Union[PathLike, BinaryIO] , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Any , ):
__lowerCamelCase = dataset
__lowerCamelCase = path_or_buf
__lowerCamelCase = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase = parquet_writer_kwargs
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
__lowerCamelCase = self._write(file_obj=UpperCamelCase_ , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
else:
__lowerCamelCase = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
return written
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: BinaryIO , UpperCamelCase_: int , **UpperCamelCase_: List[Any] ):
__lowerCamelCase = 0
__lowerCamelCase = parquet_writer_kwargs.pop("""path_or_buf""" , UpperCamelCase_ )
__lowerCamelCase = self.dataset.features.arrow_schema
__lowerCamelCase = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ , **UpperCamelCase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase_ ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
__lowerCamelCase = query_table(
table=self.dataset._data , key=slice(UpperCamelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase_ )
written += batch.nbytes
writer.close()
return written
| 29
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 1
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCAmelCase_ = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1_000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
UpperCAmelCase_ = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1_000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
UpperCAmelCase_ = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
UpperCAmelCase_ = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
UpperCAmelCase_ = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
UpperCAmelCase_ = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
if isinstance(A__ , A__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def lowerCamelCase__ ( A__ : List[str] , A__ : Optional[int] , A__ : Optional[Any] , A__ : int , A__ : Optional[Any]=False ):
'''simple docstring'''
__lowerCamelCase = checkpoint[f'{old_prefix}.in_layers.0.weight']
__lowerCamelCase = checkpoint[f'{old_prefix}.in_layers.0.bias']
__lowerCamelCase = checkpoint[f'{old_prefix}.in_layers.2.weight']
__lowerCamelCase = checkpoint[f'{old_prefix}.in_layers.2.bias']
__lowerCamelCase = checkpoint[f'{old_prefix}.emb_layers.1.weight']
__lowerCamelCase = checkpoint[f'{old_prefix}.emb_layers.1.bias']
__lowerCamelCase = checkpoint[f'{old_prefix}.out_layers.0.weight']
__lowerCamelCase = checkpoint[f'{old_prefix}.out_layers.0.bias']
__lowerCamelCase = checkpoint[f'{old_prefix}.out_layers.3.weight']
__lowerCamelCase = checkpoint[f'{old_prefix}.out_layers.3.bias']
if has_skip:
__lowerCamelCase = checkpoint[f'{old_prefix}.skip_connection.weight']
__lowerCamelCase = checkpoint[f'{old_prefix}.skip_connection.bias']
return new_checkpoint
def lowerCamelCase__ ( A__ : Tuple , A__ : Dict , A__ : int , A__ : List[Any] , A__ : Optional[int]=None ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
__lowerCamelCase = checkpoint[f'{old_prefix}.norm.weight']
__lowerCamelCase = checkpoint[f'{old_prefix}.norm.bias']
__lowerCamelCase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = (
checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
__lowerCamelCase = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A__ : str , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = {}
__lowerCamelCase = checkpoint["""time_embed.0.weight"""]
__lowerCamelCase = checkpoint["""time_embed.0.bias"""]
__lowerCamelCase = checkpoint["""time_embed.2.weight"""]
__lowerCamelCase = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
__lowerCamelCase = checkpoint["""label_emb.weight"""]
__lowerCamelCase = checkpoint["""input_blocks.0.0.weight"""]
__lowerCamelCase = checkpoint["""input_blocks.0.0.bias"""]
__lowerCamelCase = unet_config["""down_block_types"""]
__lowerCamelCase = unet_config["""layers_per_block"""]
__lowerCamelCase = unet_config["""attention_head_dim"""]
__lowerCamelCase = unet_config["""block_out_channels"""]
__lowerCamelCase = 1
__lowerCamelCase = channels_list[0]
for i, layer_type in enumerate(A__ ):
__lowerCamelCase = channels_list[i]
__lowerCamelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A__ ):
__lowerCamelCase = f'down_blocks.{i}.resnets.{j}'
__lowerCamelCase = f'input_blocks.{current_layer}.0'
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A__ ):
__lowerCamelCase = f'down_blocks.{i}.resnets.{j}'
__lowerCamelCase = f'input_blocks.{current_layer}.0'
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
__lowerCamelCase = f'down_blocks.{i}.attentions.{j}'
__lowerCamelCase = f'input_blocks.{current_layer}.1'
__lowerCamelCase = convert_attention(
A__ , A__ , A__ , A__ , A__ )
current_layer += 1
if i != len(A__ ) - 1:
__lowerCamelCase = f'down_blocks.{i}.downsamplers.0'
__lowerCamelCase = f'input_blocks.{current_layer}.0'
__lowerCamelCase = convert_resnet(A__ , A__ , A__ , A__ )
current_layer += 1
__lowerCamelCase = current_channels
# hardcoded the mid-block for now
__lowerCamelCase = """mid_block.resnets.0"""
__lowerCamelCase = """middle_block.0"""
__lowerCamelCase = convert_resnet(A__ , A__ , A__ , A__ )
__lowerCamelCase = """mid_block.attentions.0"""
__lowerCamelCase = """middle_block.1"""
__lowerCamelCase = convert_attention(A__ , A__ , A__ , A__ , A__ )
__lowerCamelCase = """mid_block.resnets.1"""
__lowerCamelCase = """middle_block.2"""
__lowerCamelCase = convert_resnet(A__ , A__ , A__ , A__ )
__lowerCamelCase = 0
__lowerCamelCase = unet_config["""up_block_types"""]
for i, layer_type in enumerate(A__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = f'up_blocks.{i}.resnets.{j}'
__lowerCamelCase = f'output_blocks.{current_layer}.0'
__lowerCamelCase = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
current_layer += 1
if i != len(A__ ) - 1:
__lowerCamelCase = f'up_blocks.{i}.upsamplers.0'
__lowerCamelCase = f'output_blocks.{current_layer-1}.1'
__lowerCamelCase = convert_resnet(A__ , A__ , A__ , A__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = f'up_blocks.{i}.resnets.{j}'
__lowerCamelCase = f'output_blocks.{current_layer}.0'
__lowerCamelCase = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
__lowerCamelCase = f'up_blocks.{i}.attentions.{j}'
__lowerCamelCase = f'output_blocks.{current_layer}.1'
__lowerCamelCase = convert_attention(
A__ , A__ , A__ , A__ , A__ )
current_layer += 1
if i != len(A__ ) - 1:
__lowerCamelCase = f'up_blocks.{i}.upsamplers.0'
__lowerCamelCase = f'output_blocks.{current_layer-1}.2'
__lowerCamelCase = convert_resnet(A__ , A__ , A__ , A__ )
__lowerCamelCase = checkpoint["""out.0.weight"""]
__lowerCamelCase = checkpoint["""out.0.bias"""]
__lowerCamelCase = checkpoint["""out.2.weight"""]
__lowerCamelCase = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = strabool(args.class_cond)
UpperCAmelCase_ = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCAmelCase_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCAmelCase_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCAmelCase_ = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
UpperCAmelCase_ = None
UpperCAmelCase_ = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCAmelCase_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCAmelCase_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCAmelCase_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCAmelCase_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
UpperCAmelCase_ = CMStochasticIterativeScheduler(**scheduler_config)
UpperCAmelCase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 29
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 1
|
from __future__ import annotations
UpperCAmelCase_ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( A__ : Matrix , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(A__ ):
__lowerCamelCase, __lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A__ , A__ , A__ , A__ ):
__lowerCamelCase = digit
if sudoku(A__ ) is not None:
return grid
__lowerCamelCase = 0
return None
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(A__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 29
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = 'xlnet'
UpperCAmelCase__ : Union[str, Any] = ['mems']
UpperCAmelCase__ : Union[str, Any] = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self: Optional[Any] , UpperCamelCase_: List[str]=3_20_00 , UpperCamelCase_: Tuple=10_24 , UpperCamelCase_: int=24 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: str=40_96 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]="bi" , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[Any]=5_12 , UpperCamelCase_: Any=None , UpperCamelCase_: str=True , UpperCamelCase_: List[Any]=False , UpperCamelCase_: int=False , UpperCamelCase_: Tuple=-1 , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: str="last" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: int="tanh" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=5 , UpperCamelCase_: Dict=5 , UpperCamelCase_: int=5 , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Any=2 , **UpperCamelCase_: Optional[int] , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = n_layer
__lowerCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__lowerCamelCase = d_model // n_head
__lowerCamelCase = ff_activation
__lowerCamelCase = d_inner
__lowerCamelCase = untie_r
__lowerCamelCase = attn_type
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = dropout
__lowerCamelCase = mem_len
__lowerCamelCase = reuse_len
__lowerCamelCase = bi_data
__lowerCamelCase = clamp_len
__lowerCamelCase = same_length
__lowerCamelCase = summary_type
__lowerCamelCase = summary_use_proj
__lowerCamelCase = summary_activation
__lowerCamelCase = summary_last_dropout
__lowerCamelCase = start_n_top
__lowerCamelCase = end_n_top
__lowerCamelCase = bos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , UpperCamelCase_ , )
__lowerCamelCase = kwargs["""use_cache"""]
__lowerCamelCase = use_mems_eval
__lowerCamelCase = use_mems_train
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: str ):
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Union[str, Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 29
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 1
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase__ ( A__ : Optional[Any] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__lowerCamelCase = [1, 2, 3]
with pytest.raises(A__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A__ , A__ , num_proc=2 )
with pytest.raises(A__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A__ , A__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = [1, 2]
__lowerCamelCase = {"""a""": 1, """b""": 2}
__lowerCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
__lowerCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
__lowerCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__lowerCamelCase = [2, 3]
__lowerCamelCase = {"""a""": 2, """b""": 3}
__lowerCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
__lowerCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
__lowerCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
| 29
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 1
|
def lowerCamelCase__ ( A__ : int = 600851475143 ):
'''simple docstring'''
try:
__lowerCamelCase = int(A__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
__lowerCamelCase = 2
__lowerCamelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCamelCase = i
while n % i == 0:
__lowerCamelCase = n // i
i += 1
return int(A__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 1
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
def lowerCamelCase__ ( A__ : type , A__ : Optional[str] , A__ : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCamelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
__lowerCamelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
__lowerCamelCase = format_type
def lowerCamelCase__ ( A__ : Exception , A__ : Optional[str] , A__ : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCamelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCamelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
UpperCAmelCase_ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
UpperCAmelCase_ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
UpperCAmelCase_ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def lowerCamelCase__ ( A__ : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase__ ( A__ : Optional[str] , **A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = get_format_type_from_alias(A__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**A__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 29
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 1
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = 9
__lowerCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCamelCase = kruskal(A__ , A__ )
__lowerCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(A__ ) == sorted(A__ )
| 29
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
| 1
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCAmelCase_ = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
UpperCAmelCase_ = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
UpperCAmelCase_ = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase__ ( A__ : Tuple , A__ : Dict ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase__ ( A__ : Dict , A__ : Tuple , A__ : Union[str, Any]="binary" ):
'''simple docstring'''
__lowerCamelCase = simple_accuracy(A__ , A__ )
__lowerCamelCase = float(fa_score(y_true=A__ , y_pred=A__ , average=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( A__ : str , A__ : int ):
'''simple docstring'''
__lowerCamelCase = {}
for id_pred, label in zip(A__ , A__ ):
__lowerCamelCase = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
__lowerCamelCase = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__lowerCamelCase = [(pred, label)]
__lowerCamelCase, __lowerCamelCase = [], []
for question, preds_labels in question_map.items():
__lowerCamelCase, __lowerCamelCase = zip(*A__ )
__lowerCamelCase = fa_score(y_true=A__ , y_pred=A__ , average="""macro""" )
fas.append(A__ )
__lowerCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(A__ ) )
ems.append(A__ )
__lowerCamelCase = float(sum(A__ ) / len(A__ ) )
__lowerCamelCase = sum(A__ ) / len(A__ )
__lowerCamelCase = float(fa_score(y_true=A__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__( datasets.Metric):
def lowerCAmelCase__ ( self: Any ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def lowerCAmelCase__ ( self: List[str] ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ , fa_avg="""macro""" )
elif self.config_name == "record":
__lowerCamelCase = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
__lowerCamelCase = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(UpperCamelCase_ , UpperCamelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 29
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCAmelCase_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
UpperCAmelCase_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ConvBertTokenizer
def __init__( self: Dict , UpperCamelCase_: Tuple=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[Any]="[UNK]" , UpperCamelCase_: Any="[SEP]" , UpperCamelCase_: List[Any]="[PAD]" , UpperCamelCase_: Dict="[CLS]" , UpperCamelCase_: List[Any]="[MASK]" , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase_ ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(UpperCamelCase_ , normalizer_state.pop("""type""" ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**UpperCamelCase_ )
__lowerCamelCase = do_lower_case
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str=None ):
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 29
|
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29
| 1
|
from __future__ import annotations
UpperCAmelCase_ = list[tuple[int, int]]
UpperCAmelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: float , UpperCamelCase_: Node | None , ):
__lowerCamelCase = pos_x
__lowerCamelCase = pos_y
__lowerCamelCase = (pos_y, pos_x)
__lowerCamelCase = goal_x
__lowerCamelCase = goal_y
__lowerCamelCase = g_cost
__lowerCamelCase = parent
__lowerCamelCase = self.calculate_heuristic()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = abs(self.pos_x - self.goal_x )
__lowerCamelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self: Union[str, Any] , UpperCamelCase_: Dict ):
return self.f_cost < other.f_cost
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: tuple[int, int] , UpperCamelCase_: tuple[int, int] ):
__lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase_ )
__lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , UpperCamelCase_ )
__lowerCamelCase = [self.start]
__lowerCamelCase = []
__lowerCamelCase = False
def lowerCAmelCase__ ( self: Any ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCamelCase = True
return self.retrace_path(UpperCamelCase_ )
self.closed_nodes.append(UpperCamelCase_ )
__lowerCamelCase = self.get_successors(UpperCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase_ )
else:
# retrieve the best current path
__lowerCamelCase = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase_ )
else:
self.open_nodes.append(UpperCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Node ):
__lowerCamelCase = []
for action in delta:
__lowerCamelCase = parent.pos_x + action[1]
__lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase_ , ) )
return successors
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Node | None ):
__lowerCamelCase = node
__lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCamelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
UpperCAmelCase_ = GreedyBestFirst(init, goal)
UpperCAmelCase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase_ = 2
for elem in grid:
print(elem)
| 29
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29
| 1
|
from string import ascii_uppercase
UpperCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ = dict(enumerate(ascii_uppercase))
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
__lowerCamelCase = 0
while True:
if x == i:
__lowerCamelCase = 0
if len(A__ ) == len(A__ ):
break
key += key[i]
i += 1
return key
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = """"""
__lowerCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowerCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = """"""
__lowerCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowerCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """THE GERMAN ATTACK"""
__lowerCamelCase = """SECRET"""
__lowerCamelCase = generate_key(A__ , A__ )
__lowerCamelCase = cipher_text(A__ , A__ )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(A__ , A__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 1
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = 'autoformer'
UpperCAmelCase__ : int = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self: Optional[int] , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "student_t" , UpperCamelCase_: str = "nll" , UpperCamelCase_: int = 1 , UpperCamelCase_: List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase_: bool = True , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: int = 64 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 32 , UpperCamelCase_: str = "gelu" , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: bool = True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: int = 10 , UpperCamelCase_: int = 25 , UpperCamelCase_: int = 3 , **UpperCamelCase_: str , ):
# time series specific configuration
__lowerCamelCase = prediction_length
__lowerCamelCase = context_length if context_length is not None else prediction_length
__lowerCamelCase = distribution_output
__lowerCamelCase = loss
__lowerCamelCase = input_size
__lowerCamelCase = num_time_features
__lowerCamelCase = lags_sequence
__lowerCamelCase = scaling
__lowerCamelCase = num_dynamic_real_features
__lowerCamelCase = num_static_real_features
__lowerCamelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__lowerCamelCase = cardinality
else:
__lowerCamelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__lowerCamelCase = embedding_dimension
else:
__lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__lowerCamelCase = d_model
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = use_cache
# Autoformer
__lowerCamelCase = label_length
__lowerCamelCase = moving_average
__lowerCamelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: Tuple ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 29
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 1
|
def lowerCamelCase__ ( A__ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(A__ , (list, tuple) ) or not all(
isinstance(A__ , A__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowerCamelCase = __lowerCamelCase = __lowerCamelCase = numbers[0]
for i in range(1 , len(A__ ) ):
# update the maximum and minimum subarray products
__lowerCamelCase = numbers[i]
if number < 0:
__lowerCamelCase, __lowerCamelCase = min_till_now, max_till_now
__lowerCamelCase = max(A__ , max_till_now * number )
__lowerCamelCase = min(A__ , min_till_now * number )
# update the maximum product found till now
__lowerCamelCase = max(A__ , A__ )
return max_prod
| 29
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 1
|
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 1
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( A__ : str , A__ : Union[str, Any]=None ):
'''simple docstring'''
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
__lowerCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
__lowerCamelCase = requests.get(A__ , headers=A__ ).json()
__lowerCamelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__lowerCamelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(A__ ):
__lowerCamelCase = requests.get(url + f'&page={i + 2}' , headers=A__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=None ):
'''simple docstring'''
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
__lowerCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
__lowerCamelCase = requests.get(A__ , headers=A__ ).json()
__lowerCamelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__lowerCamelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(A__ ):
__lowerCamelCase = requests.get(url + f'&page={i + 2}' , headers=A__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def lowerCamelCase__ ( A__ : List[Any] , A__ : int , A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
__lowerCamelCase = requests.get(A__ , headers=A__ , allow_redirects=A__ )
__lowerCamelCase = result.headers["""Location"""]
__lowerCamelCase = requests.get(A__ , allow_redirects=A__ )
__lowerCamelCase = os.path.join(A__ , f'{artifact_name}.zip' )
with open(A__ , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( A__ : int , A__ : List[str]=None ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
__lowerCamelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__lowerCamelCase = line[: line.index(""": """ )]
__lowerCamelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__lowerCamelCase = line[len("""FAILED """ ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
__lowerCamelCase = line
if len(A__ ) != len(A__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` '
f'and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
__lowerCamelCase = None
if job_name and job_links:
__lowerCamelCase = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
__lowerCamelCase = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def lowerCamelCase__ ( A__ : List[str] , A__ : str=None ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=None ):
'''simple docstring'''
__lowerCamelCase = Counter()
counter.update([x[1] for x in logs] )
__lowerCamelCase = counter.most_common()
__lowerCamelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__lowerCamelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__lowerCamelCase = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__lowerCamelCase = test.split("""/""" )[2]
else:
__lowerCamelCase = None
return test
def lowerCamelCase__ ( A__ : List[str] , A__ : str=None ):
'''simple docstring'''
__lowerCamelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
__lowerCamelCase = [x for x in logs if x[2] is not None]
__lowerCamelCase = {x[2] for x in logs}
__lowerCamelCase = {}
for test in tests:
__lowerCamelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__lowerCamelCase = counter.most_common()
__lowerCamelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__lowerCamelCase = sum(error_counts.values() )
if n_errors > 0:
__lowerCamelCase = {"""count""": n_errors, """errors""": error_counts}
__lowerCamelCase = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = """| no. | error | status |"""
__lowerCamelCase = """|-:|:-|:-|"""
__lowerCamelCase = [header, sep]
for error in reduced_by_error:
__lowerCamelCase = reduced_by_error[error]["""count"""]
__lowerCamelCase = f'| {count} | {error[:100]} | |'
lines.append(A__ )
return "\n".join(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = """| model | no. of errors | major error | count |"""
__lowerCamelCase = """|-:|-:|-:|-:|"""
__lowerCamelCase = [header, sep]
for model in reduced_by_model:
__lowerCamelCase = reduced_by_model[model]["""count"""]
__lowerCamelCase, __lowerCamelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
__lowerCamelCase = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
UpperCAmelCase_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase_ = get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase_ = k.find(' / ')
UpperCAmelCase_ = k[index + len(' / ') :]
UpperCAmelCase_ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase_ = reduce_by_error(errors)
UpperCAmelCase_ = reduce_by_model(errors)
UpperCAmelCase_ = make_github_table(reduced_by_error)
UpperCAmelCase_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 29
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 1
|
import math
import random
def lowerCamelCase__ ( A__ : float , A__ : bool = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase_ = 0.02
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(A__ ):
# Forward propagation
__lowerCamelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__lowerCamelCase = (expected / 100) - layer_a
# Error delta
__lowerCamelCase = layer_1_error * sigmoid_function(A__ , A__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = int(input('Expected value: '))
UpperCAmelCase_ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 29
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'sew'
def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any]=32 , UpperCamelCase_: str=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: Any=12 , UpperCamelCase_: Union[str, Any]=30_72 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: str="gelu" , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Optional[int]=1E-5 , UpperCamelCase_: str="group" , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Dict=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_: Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase_: Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase_: Tuple=False , UpperCamelCase_: str=1_28 , UpperCamelCase_: Any=16 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.05 , UpperCamelCase_: Tuple=10 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: str=0 , UpperCamelCase_: Union[str, Any]="mean" , UpperCamelCase_: str=False , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: Any=2_56 , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Any=1 , UpperCamelCase_: Tuple=2 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowerCAmelCase__ ( self: str ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 29
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'speech_to_text_2'
UpperCAmelCase__ : Any = ['past_key_values']
UpperCAmelCase__ : Dict = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: int , UpperCamelCase_: int=1_00_00 , UpperCamelCase_: Optional[int]=6 , UpperCamelCase_: Union[str, Any]=20_48 , UpperCamelCase_: Any=4 , UpperCamelCase_: str=0.0 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Union[str, Any]="relu" , UpperCamelCase_: Union[str, Any]=2_56 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Tuple=10_24 , **UpperCamelCase_: Union[str, Any] , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = decoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 29
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 1
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = 0
@slow
def lowerCAmelCase__ ( self: Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase__ ( self: List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase_ , """vocab.txt""" ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="""bert""" , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase_ , """merges.txt""" ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="""gpt2""" , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCAmelCase__ ( self: str ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase_ , """vocab.txt""" ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="""bert""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase_ , """merges.txt""" ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type="""gpt2""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowerCamelCase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def lowerCAmelCase__ ( self: Union[str, Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__lowerCamelCase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCAmelCase__ ( self: Any ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__lowerCamelCase = TOKENIZER_MAPPING.values()
__lowerCamelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCAmelCase__ ( self: Union[str, Any] ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , UpperCamelCase_ )
@require_tokenizers
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = """Hello, world. How are you?"""
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
__lowerCamelCase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=UpperCamelCase_ )
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
# Check we can load the tokenizer config of an online model.
__lowerCamelCase = get_tokenizer_config("""bert-base-cased""" )
__lowerCamelCase = config.pop("""_commit_hash""" , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__lowerCamelCase = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCAmelCase__ ( self: str ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
__lowerCamelCase = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCAmelCase__ ( self: Tuple ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = False
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = NewTokenizer
UpperCAmelCase__ : Any = False
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowerCamelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase__ ( self: List[str] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Any ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: int ):
# Make sure we have cached the tokenizer.
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 29
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 1
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCamelCase__( unittest.TestCase):
def __init__( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: int=7 , UpperCamelCase_: Dict=3 , UpperCamelCase_: int=18 , UpperCamelCase_: Tuple=30 , UpperCamelCase_: Optional[Any]=4_00 , UpperCamelCase_: Tuple=None , UpperCamelCase_: Dict=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Dict=None , ):
__lowerCamelCase = size if size is not None else {"""height""": 20, """width""": 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def lowerCAmelCase__ ( self: Dict ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
__lowerCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_convert_rgb""" ) )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase__ ( self: Any ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase__ ( self: List[str] ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
__lowerCamelCase = """Hello"""
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase__ ( self: str ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase__ ( self: List[str] ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def lowerCAmelCase__ ( self: Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_convert_rgb""" ) )
def lowerCAmelCase__ ( self: int ):
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 29
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 1
|
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
__lowerCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
__lowerCamelCase = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = 'time_series_transformer'
UpperCAmelCase__ : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self: List[str] , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "student_t" , UpperCamelCase_: str = "nll" , UpperCamelCase_: int = 1 , UpperCamelCase_: List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase_: Optional[Union[str, bool]] = "mean" , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: bool = True , UpperCamelCase_: str = "gelu" , UpperCamelCase_: int = 64 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: Union[str, Any]=True , **UpperCamelCase_: Union[str, Any] , ):
# time series specific configuration
__lowerCamelCase = prediction_length
__lowerCamelCase = context_length or prediction_length
__lowerCamelCase = distribution_output
__lowerCamelCase = loss
__lowerCamelCase = input_size
__lowerCamelCase = num_time_features
__lowerCamelCase = lags_sequence
__lowerCamelCase = scaling
__lowerCamelCase = num_dynamic_real_features
__lowerCamelCase = num_static_real_features
__lowerCamelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__lowerCamelCase = cardinality
else:
__lowerCamelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__lowerCamelCase = embedding_dimension
else:
__lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase = input_size * len(UpperCamelCase_ ) + self._number_of_features
__lowerCamelCase = d_model
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = use_cache
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 29
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 1
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = os.path.dirname(os.path.realpath(A__ ) )
__lowerCamelCase = os.path.join(A__ , """words.txt""" )
__lowerCamelCase = """"""
with open(A__ ) as f:
__lowerCamelCase = f.readline()
__lowerCamelCase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__lowerCamelCase = [
word
for word in [sum(ord(A__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A__ )
if __name__ == "__main__":
print(solution())
| 29
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
UpperCAmelCase_ = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class lowerCamelCase__( SCREAMING_SNAKE_CASE_):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ElectraTokenizer
def __init__( self: int , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any="[UNK]" , UpperCamelCase_: Any="[SEP]" , UpperCamelCase_: Union[str, Any]="[PAD]" , UpperCamelCase_: Dict="[CLS]" , UpperCamelCase_: List[Any]="[MASK]" , UpperCamelCase_: str=True , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: Optional[int] , ):
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __a ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(__a , normalizer_state.pop("""type""" ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**__a )
__lowerCamelCase = do_lower_case
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 350
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MaskFormerFeatureExtractor']
UpperCAmelCase_ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCAmelCase_ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 351
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( _UpperCAmelCase):
UpperCAmelCase__ : Union[str, Any] = "megatron-bert"
def __init__( self: Dict , UpperCamelCase_: Dict=2_90_56 , UpperCamelCase_: Optional[int]=10_24 , UpperCamelCase_: List[Any]=24 , UpperCamelCase_: Dict=16 , UpperCamelCase_: Optional[Any]=40_96 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: str=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: Union[str, Any]="absolute" , UpperCamelCase_: List[str]=True , **UpperCamelCase_: List[str] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
| 352
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCamelCase__( a__):
UpperCAmelCase__ : int = 'levit'
def __init__( self: int , UpperCamelCase_: List[Any]=2_24 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: int=3 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Optional[int]=1 , UpperCamelCase_: List[str]=16 , UpperCamelCase_: Tuple=[1_28, 2_56, 3_84] , UpperCamelCase_: List[str]=[4, 8, 12] , UpperCamelCase_: Optional[int]=[4, 4, 4] , UpperCamelCase_: Union[str, Any]=[16, 16, 16] , UpperCamelCase_: int=0 , UpperCamelCase_: Union[str, Any]=[2, 2, 2] , UpperCamelCase_: Optional[Any]=[2, 2, 2] , UpperCamelCase_: Optional[Any]=0.02 , **UpperCamelCase_: List[Any] , ):
super().__init__(**_lowerCamelCase )
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = kernel_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = hidden_sizes
__lowerCamelCase = num_attention_heads
__lowerCamelCase = depths
__lowerCamelCase = key_dim
__lowerCamelCase = drop_path_rate
__lowerCamelCase = patch_size
__lowerCamelCase = attention_ratio
__lowerCamelCase = mlp_ratio
__lowerCamelCase = initializer_range
__lowerCamelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCamelCase__( a__):
UpperCAmelCase__ : int = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: int ):
return 1E-4
| 354
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A__ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
__lowerCamelCase = soup.findAll("""h1""" )
__lowerCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 29
| 0
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCAmelCase_ = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
UpperCAmelCase_ = f"""https://www.google.com/search?q={query}&num=100"""
UpperCAmelCase_ = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
UpperCAmelCase_ = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
UpperCAmelCase_ = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 355
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCamelCase__( lowerCamelCase__):
UpperCAmelCase__ : Any = 'mobilenet_v2'
def __init__( self: List[Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: List[str]=2_24 , UpperCamelCase_: Optional[Any]=1.0 , UpperCamelCase_: Tuple=8 , UpperCamelCase_: List[str]=8 , UpperCamelCase_: Tuple=6 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Optional[Any]="relu6" , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[Any]=0.8 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: List[str]=0.001 , UpperCamelCase_: int=2_55 , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = depth_multiplier
__lowerCamelCase = depth_divisible_by
__lowerCamelCase = min_depth
__lowerCamelCase = expand_ratio
__lowerCamelCase = output_stride
__lowerCamelCase = first_layer_is_expansion
__lowerCamelCase = finegrained_output
__lowerCamelCase = hidden_act
__lowerCamelCase = tf_padding
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( lowerCamelCase__):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: List[Any] ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
| 356
|
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29
| 0
|
from __future__ import annotations
import time
UpperCAmelCase_ = list[tuple[int, int]]
UpperCAmelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: Node | None ):
__lowerCamelCase = pos_x
__lowerCamelCase = pos_y
__lowerCamelCase = (pos_y, pos_x)
__lowerCamelCase = goal_x
__lowerCamelCase = goal_y
__lowerCamelCase = parent
class lowerCamelCase__:
def __init__( self: List[Any] , UpperCamelCase_: tuple[int, int] , UpperCamelCase_: tuple[int, int] ):
__lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , _snake_case )
__lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , _snake_case )
__lowerCamelCase = [self.start]
__lowerCamelCase = False
def lowerCAmelCase__ ( self: List[Any] ):
while self.node_queue:
__lowerCamelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__lowerCamelCase = True
return self.retrace_path(_snake_case )
__lowerCamelCase = self.get_successors(_snake_case )
for node in successors:
self.node_queue.append(_snake_case )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Node ):
__lowerCamelCase = []
for action in delta:
__lowerCamelCase = parent.pos_x + action[1]
__lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , _snake_case ) )
return successors
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Node | None ):
__lowerCamelCase = node
__lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCamelCase = current_node.parent
path.reverse()
return path
class lowerCamelCase__:
def __init__( self: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any ):
__lowerCamelCase = BreadthFirstSearch(_snake_case , _snake_case )
__lowerCamelCase = BreadthFirstSearch(_snake_case , _snake_case )
__lowerCamelCase = False
def lowerCAmelCase__ ( self: Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase = self.fwd_bfs.node_queue.pop(0 )
__lowerCamelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase = True
return self.retrace_bidirectional_path(
_snake_case , _snake_case )
__lowerCamelCase = current_bwd_node
__lowerCamelCase = current_fwd_node
__lowerCamelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(_snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(_snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Node , UpperCamelCase_: Node ):
__lowerCamelCase = self.fwd_bfs.retrace_path(_snake_case )
__lowerCamelCase = self.bwd_bfs.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = BreadthFirstSearch(init, goal)
UpperCAmelCase_ = bfs.search()
UpperCAmelCase_ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase_ = bd_bfs.search()
UpperCAmelCase_ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 357
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : int , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = set()
__lowerCamelCase = []
def parse_line(A__ : Union[str, Any] ):
for line in fp:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowerCamelCase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase_ ) > 0:
__lowerCamelCase = """\n""".join(lowerCamelCase_ )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase_ )
buffer.clear()
continue
else:
__lowerCamelCase = line.strip()
buffer.append(lowerCamelCase_ )
if from_gh:
for filename in os.listdir(lowerCamelCase_ ):
__lowerCamelCase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
else:
try:
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def lowerCamelCase__ ( A__ : Dict , A__ : str ):
'''simple docstring'''
__lowerCamelCase = set()
__lowerCamelCase = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase_ , lowerCamelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return values.split(""",""" )
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase_ = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 358
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase_ = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase_ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
re.sub("""<n>""" , """""" , A__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A__ ) )
| 359
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 0
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__( nn.Module):
def __init__( self: List[str] , UpperCamelCase_: Optional[Any] ):
super().__init__()
__lowerCamelCase = torchvision.models.resnetaaa(pretrained=snake_case__ )
__lowerCamelCase = list(model.children() )[:-2]
__lowerCamelCase = nn.Sequential(*snake_case__ )
__lowerCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = self.pool(self.model(snake_case__ ) )
__lowerCamelCase = torch.flatten(snake_case__ , start_dim=2 )
__lowerCamelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__( A_):
def __init__( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] ):
__lowerCamelCase = [json.loads(snake_case__ ) for l in open(snake_case__ )]
__lowerCamelCase = os.path.dirname(snake_case__ )
__lowerCamelCase = tokenizer
__lowerCamelCase = labels
__lowerCamelCase = len(snake_case__ )
__lowerCamelCase = max_seq_length
__lowerCamelCase = transforms
def __len__( self: str ):
return len(self.data )
def __getitem__( self: Tuple , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=snake_case__ ) )
__lowerCamelCase = sentence[0], sentence[1:-1], sentence[-1]
__lowerCamelCase = sentence[: self.max_seq_length]
__lowerCamelCase = torch.zeros(self.n_classes )
__lowerCamelCase = 1
__lowerCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__lowerCamelCase = self.transforms(snake_case__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = [len(row["""sentence"""] ) for row in batch]
__lowerCamelCase = len(__lowerCamelCase ), max(__lowerCamelCase )
__lowerCamelCase = torch.zeros(__lowerCamelCase , __lowerCamelCase , dtype=torch.long )
__lowerCamelCase = torch.zeros(__lowerCamelCase , __lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ):
__lowerCamelCase = input_row["sentence"]
__lowerCamelCase = 1
__lowerCamelCase = torch.stack([row["""image"""] for row in batch] )
__lowerCamelCase = torch.stack([row["""label"""] for row in batch] )
__lowerCamelCase = torch.stack([row["""image_start_token"""] for row in batch] )
__lowerCamelCase = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 360
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class lowerCamelCase__( __SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True})
UpperCAmelCase__ : ClassVar[Features] = Features({'audio': Audio()})
UpperCAmelCase__ : ClassVar[Features] = Features({'transcription': Value('string')})
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "transcription"
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict ):
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , _a ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
__lowerCamelCase = copy.deepcopy(self )
__lowerCamelCase = self.input_schema.copy()
__lowerCamelCase = features[self.audio_column]
__lowerCamelCase = input_schema
return task_template
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 361
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Optional[int] ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: str ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: List[Any] ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Optional[Any] ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 362
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: List[str] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any]=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any]=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: int , UpperCamelCase_: Optional[int]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 363
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
__lowerCamelCase : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__lowerCamelCase : List[str] = test_metrics
@require_cpu
def lowerCAmelCase__ ( self: Optional[int] ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowerCAmelCase__ ( self: Tuple ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCAmelCase__ ( self: Any ):
self.test_metrics.main()
@require_multi_gpu
def lowerCAmelCase__ ( self: int ):
print(F'Found {torch.cuda.device_count()} devices.' )
__lowerCamelCase : Optional[Any] = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 364
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ = '>>zh<<'
UpperCAmelCase_ = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ = 'pt'
elif is_tf_available():
UpperCAmelCase_ = 'tf'
else:
UpperCAmelCase_ = 'jax'
@require_sentencepiece
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MarianTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
__lowerCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """</s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def lowerCAmelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__lowerCamelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
__lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = [x.name for x in Path(UpperCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowerCamelCase = """Tämä on testi"""
__lowerCamelCase = """This is a test"""
__lowerCamelCase = [76, 7, 20_47, 2]
__lowerCamelCase = [69, 12, 11, 9_40, 2]
__lowerCamelCase = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 29
| 0
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: str ):
return F'gaussian_noise_s={seed}_shape={"_".join([str(_a ) for s in shape] )}.npy'
def lowerCAmelCase__ ( self: List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Optional[int]=(4, 4, 64, 64) , UpperCamelCase_: str=False ):
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return image
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any]=False , UpperCamelCase_: Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = '''bf16''' if fpaa else None
__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained(
_a , subfolder="""unet""" , dtype=_a , revision=_a )
return model, params
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[str]=0 , UpperCamelCase_: Dict=(4, 77, 7_68) , UpperCamelCase_: List[str]=False ):
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
__lowerCamelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=_a )
__lowerCamelCase = self.get_latents(_a , fpaa=_a )
__lowerCamelCase = self.get_encoder_hidden_states(_a , fpaa=_a )
__lowerCamelCase = model.apply(
{"""params""": params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
__lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_a , _a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: str , UpperCamelCase_: List[str] ):
__lowerCamelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=_a )
__lowerCamelCase = self.get_latents(_a , shape=(4, 4, 96, 96) , fpaa=_a )
__lowerCamelCase = self.get_encoder_hidden_states(_a , shape=(4, 77, 10_24) , fpaa=_a )
__lowerCamelCase = model.apply(
{"""params""": params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
__lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_a , _a , atol=1E-2 )
| 365
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
UpperCAmelCase_ = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
UpperCAmelCase_ = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
UpperCAmelCase_ = {'mustc': MUSTC_LANGS}
class lowerCamelCase__( __SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = MAX_MODEL_INPUT_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
def __init__( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Tuple="</s>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Union[str, Any]="<unk>" , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: List[str]=False , UpperCamelCase_: Dict=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: List[Any] = None , **UpperCamelCase_: List[str] , ):
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_upper_case=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , lang_codes=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowerCamelCase = do_upper_case
__lowerCamelCase = do_lower_case
__lowerCamelCase = load_json(__UpperCAmelCase )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
__lowerCamelCase = spm_file
__lowerCamelCase = load_spm(__UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__lowerCamelCase = lang_codes
__lowerCamelCase = LANGUAGES[lang_codes]
__lowerCamelCase = [F'<lang:{lang}>' for lang in self.langs]
__lowerCamelCase = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
__lowerCamelCase = self.lang_tokens
__lowerCamelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__lowerCamelCase = {}
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return len(self.encoder )
@property
def lowerCAmelCase__ ( self: str ):
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[str] ):
__lowerCamelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(__UpperCAmelCase )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.lang_code_to_id[tgt_lang]
__lowerCamelCase = [lang_code_id]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[Any] ):
return self.encoder.get(__UpperCAmelCase , self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: int ):
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
__lowerCamelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowerCamelCase = self.sp_model.decode(__UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowerCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowerCamelCase = self.sp_model.decode(__UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Any = None , UpperCamelCase_: int = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [1] * len(self.prefix_tokens )
__lowerCamelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Union[str, Any] ):
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self: Union[str, Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase = {}
__lowerCamelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: int = None ):
__lowerCamelCase = Path(__UpperCAmelCase )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
__lowerCamelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__lowerCamelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (str(__UpperCAmelCase ), str(__UpperCAmelCase ))
def lowerCamelCase__ ( A__ : str , A__ : Dict[str, Any] ):
'''simple docstring'''
__lowerCamelCase = sentencepiece.SentencePieceProcessor(**lowerCAmelCase__ )
spm.Load(str(lowerCAmelCase__ ) )
return spm
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
with open(lowerCAmelCase__ , """r""" ) as f:
return json.load(lowerCAmelCase__ )
def lowerCamelCase__ ( A__ : Dict , A__ : str ):
'''simple docstring'''
with open(lowerCAmelCase__ , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=2 )
| 366
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
UpperCAmelCase_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
UpperCAmelCase_ = 3
UpperCAmelCase_ = 4
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = '''left'''
def __init__( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=False , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=False , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: int="<unk>" , UpperCamelCase_: List[str]="<sep>" , UpperCamelCase_: Union[str, Any]="<pad>" , UpperCamelCase_: Tuple="<cls>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[int]=["<eop>", "<eod>"] , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: List[Any] , ):
__lowerCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__lowerCamelCase = 3
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def lowerCAmelCase__ ( self: Any ):
return len(self.sp_model )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Union[str, Any] ):
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self: str , UpperCamelCase_: Dict ):
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int ):
if self.remove_space:
__lowerCamelCase = " ".join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace('``' , '\"' ).replace('\'\'' , '\"' )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize('NFKD' , lowerCAmelCase__ )
__lowerCamelCase = "".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase__ )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = self.preprocess_text(lowerCAmelCase__ )
__lowerCamelCase = self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
__lowerCamelCase = []
for piece in pieces:
if len(lowerCAmelCase__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase__ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase__ )
else:
new_pieces.append(lowerCAmelCase__ )
return new_pieces
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Dict ):
return self.sp_model.PieceToId(lowerCAmelCase__ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] ):
__lowerCamelCase = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ' ' ).strip()
return out_string
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: bool = False , UpperCamelCase_: bool = None , UpperCamelCase_: bool = True , **UpperCamelCase_: Optional[int] , ):
__lowerCamelCase = kwargs.pop('use_source_tokenizer' , lowerCAmelCase__ )
__lowerCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCamelCase = []
__lowerCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
__lowerCamelCase = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowerCamelCase = "".join(lowerCAmelCase__ )
__lowerCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCamelCase = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1]
return ([0] * len(lowerCAmelCase__ )) + [1, 1]
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 367
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 0
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=13 , UpperCamelCase_: str=7 , UpperCamelCase_: List[str]=True , UpperCamelCase_: str=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: str=99 , UpperCamelCase_: List[str]=[1, 1, 2] , UpperCamelCase_: Optional[int]=1 , UpperCamelCase_: str=32 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: Dict=37 , UpperCamelCase_: Tuple="gelu_new" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: int=5_12 , UpperCamelCase_: Any=3 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: str=3 , UpperCamelCase_: str=4 , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[Any]=False , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , ):
__lowerCamelCase = TFFunnelModel(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: List[Any] , ):
__lowerCamelCase = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , ):
__lowerCamelCase = TFFunnelForPreTraining(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Any , ):
__lowerCamelCase = TFFunnelForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: int , ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , ):
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple , ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , ):
__lowerCamelCase = TFFunnelForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : str = False
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@require_tf
class lowerCamelCase__( _UpperCamelCase , unittest.TestCase):
UpperCAmelCase__ : str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Any = False
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = TFFunnelModelTester(self , base=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self: List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
| 368
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29
| 0
|
UpperCAmelCase_ = '''Tobias Carryer'''
from time import time
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Any=int(time() ) ): # noqa: B008
__lowerCamelCase = multiplier
__lowerCamelCase = increment
__lowerCamelCase = modulo
__lowerCamelCase = seed
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCAmelCase_ = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 369
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: str ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
if os.path.isfile(_lowerCAmelCase ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowerCamelCase = os.path.join(_lowerCAmelCase , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(_lowerCAmelCase , """generator_tokenizer""" )
self.question_encoder.save_pretrained(_lowerCAmelCase )
self.generator.save_pretrained(_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: Union[str, Any] , UpperCamelCase_: Tuple , **UpperCamelCase_: List[str] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , _lowerCAmelCase )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase = AutoTokenizer.from_pretrained(
_lowerCAmelCase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
_lowerCAmelCase , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=_lowerCAmelCase , generator=_lowerCAmelCase )
def __call__( self: List[str] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.current_tokenizer(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Optional[int] ):
return self.generator.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: int , *UpperCamelCase_: Tuple , **UpperCamelCase_: List[str] ):
return self.generator.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: List[str] , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , _lowerCAmelCase , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , **_lowerCAmelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , **_lowerCAmelCase , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 370
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.