code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a : Tuple = logging.get_logger(__name__)
def lowercase__(A , A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= set()
lowercase__ : Optional[int]= []
def parse_line(A ):
for line in fp:
if isinstance(A , A ):
lowercase__ : Dict= line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(A ) > 0:
lowercase__ : Tuple= "\n".join(A )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(A )
buffer.clear()
continue
else:
lowercase__ : Union[str, Any]= line.strip()
buffer.append(A )
if from_gh:
for filename in os.listdir(A ):
lowercase__ : Optional[Any]= os.path.join(A , A )
if not os.path.isdir(A ):
# read the file
if filename != "warnings.txt":
continue
with open(A ) as fp:
parse_line(A )
else:
try:
with zipfile.ZipFile(A ) as z:
for filename in z.namelist():
if not os.path.isdir(A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(A ) as fp:
parse_line(A )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def lowercase__(A , A ) ->List[str]:
"""simple docstring"""
lowercase__ : Tuple= set()
lowercase__ : List[Any]= [os.path.join(A , A ) for p in os.listdir(A ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(A , A ) )
return selected_warnings
if __name__ == "__main__":
def lowercase__(A ) ->Dict:
"""simple docstring"""
return values.split("," )
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
a : Any = parser.parse_args()
a : Optional[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a : str = extract_warnings(args.output_dir, args.targets)
a : List[str] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 85 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.unet.config.sample_size
lowercase__ : Dict= (batch_size, 3, img_size, img_size)
lowercase__ : List[Any]= self.unet
lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase__ : Tuple= sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample
lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean
lowercase__ : List[str]= sample_mean.clamp(0 , 1 )
lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : str= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 85 | 1 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a : List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["torch", "torchsde"]
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 85 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : str= []
for part_id in partition_order:
lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Dict= Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(2 )
lowercase__ : Optional[Any]= [1, 0]
lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions.
lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(1 )
lowercase__ : str= SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int= spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : Optional[Any]= lambda A : x.reverse()
lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Optional[int]= Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 85 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
'''simple docstring'''
lowercase__ : Tuple= start
lowercase__ : Optional[int]= end
lowercase__ : Union[str, Any]= val
lowercase__ : str= (start + end) // 2
lowercase__ : Any= left
lowercase__ : int= right
def __repr__( self ):
'''simple docstring'''
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= collection
lowercase__ : List[Any]= function
if self.collection:
lowercase__ : List[Any]= self._build_tree(0 , len(snake_case__ ) - 1 )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
self._update_tree(self.root , snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return self._query_range(self.root , snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(snake_case__ , snake_case__ , self.collection[start] )
lowercase__ : Dict= (start + end) // 2
lowercase__ : Any= self._build_tree(snake_case__ , snake_case__ )
lowercase__ : Any= self._build_tree(mid + 1 , snake_case__ )
return SegmentTreeNode(snake_case__ , snake_case__ , self.fn(left.val , right.val ) , snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if node.start == i and node.end == i:
lowercase__ : Union[str, Any]= val
return
if i <= node.mid:
self._update_tree(node.left , snake_case__ , snake_case__ )
else:
self._update_tree(node.right , snake_case__ , snake_case__ )
lowercase__ : Any= self.fn(node.left.val , node.right.val )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , snake_case__ , snake_case__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , snake_case__ , node.mid ) , self._query_range(node.right , node.mid + 1 , snake_case__ ) , )
else:
# range in right child tree
return self._query_range(node.right , snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.root is not None:
lowercase__ : List[str]= Queue()
queue.put(self.root )
while not queue.empty():
lowercase__ : str= queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
a : Union[str, Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 85 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def lowercase__(A , A , A ) ->list[int]:
"""simple docstring"""
lowercase__ : Dict= [0] * no_of_processes
lowercase__ : Optional[Any]= [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A ):
lowercase__ : Dict= burst_time[i]
lowercase__ : Optional[int]= 0
lowercase__ : List[Any]= 0
lowercase__ : Any= 999_999_999
lowercase__ : Optional[Any]= 0
lowercase__ : Optional[Any]= False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowercase__ : str= remaining_time[j]
lowercase__ : Dict= j
lowercase__ : str= True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowercase__ : Dict= remaining_time[short]
if minm == 0:
lowercase__ : Dict= 999_999_999
if remaining_time[short] == 0:
complete += 1
lowercase__ : Optional[int]= False
# Find finish time of current process
lowercase__ : Optional[int]= increment_time + 1
# Calculate waiting time
lowercase__ : Any= finish_time - arrival_time[short]
lowercase__ : Optional[int]= finar - burst_time[short]
if waiting_time[short] < 0:
lowercase__ : str= 0
# Increment time
increment_time += 1
return waiting_time
def lowercase__(A , A , A ) ->list[int]:
"""simple docstring"""
lowercase__ : Optional[Any]= [0] * no_of_processes
for i in range(A ):
lowercase__ : Optional[int]= burst_time[i] + waiting_time[i]
return turn_around_time
def lowercase__(A , A , A ) ->None:
"""simple docstring"""
lowercase__ : str= 0
lowercase__ : str= 0
for i in range(A ):
lowercase__ : Any= total_waiting_time + waiting_time[i]
lowercase__ : List[Any]= total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
a : Tuple = int(input())
a : Union[str, Any] = [0] * no_of_processes
a : int = [0] * no_of_processes
a : Union[str, Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
a , a : List[str] = map(int, input().split())
a : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a : Dict = burst_time
a : Union[str, Any] = no_of_processes
a : Optional[int] = waiting_time
a : Optional[int] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a : Dict = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 85 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : Tuple = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "gpt_neox_japanese"
def __init__( self , snake_case__=32000 , snake_case__=2560 , snake_case__=32 , snake_case__=32 , snake_case__=4 , snake_case__="gelu" , snake_case__=1.00 , snake_case__=10000 , snake_case__=2048 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=True , snake_case__=31996 , snake_case__=31999 , snake_case__=0.1 , snake_case__=0.0 , **snake_case__ , ):
'''simple docstring'''
super().__init__(bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowercase__ : str= vocab_size
lowercase__ : Optional[Any]= max_position_embeddings
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= num_hidden_layers
lowercase__ : List[Any]= num_attention_heads
lowercase__ : Tuple= intermediate_multiple_size
lowercase__ : int= hidden_act
lowercase__ : Dict= rotary_pct
lowercase__ : Optional[Any]= rotary_emb_base
lowercase__ : str= initializer_range
lowercase__ : str= layer_norm_eps
lowercase__ : Optional[int]= use_cache
lowercase__ : Tuple= attention_dropout
lowercase__ : List[Any]= hidden_dropout
| 85 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 1 |
"""simple docstring"""
from statistics import mean
import numpy as np
def lowercase__(A , A , A , A ) ->list:
"""simple docstring"""
lowercase__ : str= 0
# Number of processes finished
lowercase__ : Tuple= 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__ : int= [0] * no_of_process
# List to include calculation results
lowercase__ : Union[str, Any]= [0] * no_of_process
# Sort by arrival time.
lowercase__ : Union[str, Any]= [burst_time[i] for i in np.argsort(A )]
lowercase__ : Optional[Any]= [process_name[i] for i in np.argsort(A )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__ : List[Any]= 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__ : List[Any]= arrival_time[i]
lowercase__ : Optional[Any]= 0
# Index showing the location of the process being performed
lowercase__ : Optional[Any]= 0
# Saves the current response ratio.
lowercase__ : List[Any]= 0
for i in range(0 , A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__ : Dict= (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__ : Any= temp
lowercase__ : Optional[Any]= i
# Calculate the turn around time
lowercase__ : Optional[Any]= current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__ : Any= 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowercase__(A , A , A , A ) ->list:
"""simple docstring"""
lowercase__ : Tuple= [0] * no_of_process
for i in range(0 , A ):
lowercase__ : Any= turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
a : Optional[int] = 5
a : Optional[int] = ["""A""", """B""", """C""", """D""", """E"""]
a : Union[str, Any] = [1, 2, 3, 4, 5]
a : Tuple = [1, 2, 3, 4, 5]
a : Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
a : int = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 85 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[Any] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
def lowercase__(A = 100 ) ->int:
"""simple docstring"""
lowercase__ : List[str]= set()
lowercase__ : Optional[Any]= 0
lowercase__ : Any= n + 1 # maximum limit
for a in range(2 , A ):
for b in range(2 , A ):
lowercase__ : Any= a**b # calculates the current power
collect_powers.add(A ) # adds the result to the set
return len(A )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 85 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= tempfile.mkdtemp()
# fmt: off
lowercase__ : Optional[Any]= ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Any= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : str= ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Union[str, Any]= {"unk_token": "<unk>"}
lowercase__ : str= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
lowercase__ : List[str]= {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : List[str]= [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.get_tokenizer()
lowercase__ : Dict= self.get_rust_tokenizer()
lowercase__ : List[str]= self.get_image_processor()
lowercase__ : int= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Union[str, Any]= CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
lowercase__ : Union[str, Any]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : List[Any]= CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case__ )
self.assertIsInstance(processor_fast.tokenizer , snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case__ )
self.assertIsInstance(processor_fast.image_processor , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[int]= self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : Union[str, Any]= self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowercase__ : List[str]= CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.get_image_processor()
lowercase__ : Dict= self.get_tokenizer()
lowercase__ : int= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Tuple= self.prepare_image_inputs()
lowercase__ : Union[str, Any]= image_processor(snake_case__ , return_tensors="np" )
lowercase__ : Any= processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_image_processor()
lowercase__ : str= self.get_tokenizer()
lowercase__ : List[str]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Tuple= "lower newer"
lowercase__ : Union[str, Any]= processor(text=snake_case__ )
lowercase__ : List[Any]= tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_image_processor()
lowercase__ : Optional[int]= self.get_tokenizer()
lowercase__ : Tuple= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Any= "lower newer"
lowercase__ : Any= self.prepare_image_inputs()
lowercase__ : List[str]= processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_image_processor()
lowercase__ : List[Any]= self.get_tokenizer()
lowercase__ : Optional[Any]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Any= self.prepare_image_inputs()
lowercase__ : List[Any]= self.prepare_image_inputs()
lowercase__ : Dict= processor(images=snake_case__ , visual_prompt=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.get_image_processor()
lowercase__ : List[Any]= self.get_tokenizer()
lowercase__ : Any= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : List[Any]= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : List[Any]= processor.batch_decode(snake_case__ )
lowercase__ : Optional[int]= tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 85 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85 | 1 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = (DPMSolverSDEScheduler,)
__lowerCamelCase = 10
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= {
"num_train_timesteps": 1100,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**snake_case__ )
return config
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.scheduler_classes[0]
lowercase__ : List[Any]= self.get_scheduler_config()
lowercase__ : Tuple= scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ : Union[str, Any]= self.dummy_model()
lowercase__ : Union[str, Any]= self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : List[Any]= sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : str= scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase__ : List[str]= model(snake_case__ , snake_case__ )
lowercase__ : Dict= scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowercase__ : Tuple= output.prev_sample
lowercase__ : List[str]= torch.sum(torch.abs(snake_case__ ) )
lowercase__ : int= torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.scheduler_classes[0]
lowercase__ : Dict= self.get_scheduler_config(prediction_type="v_prediction" )
lowercase__ : Dict= scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ : Optional[Any]= self.dummy_model()
lowercase__ : Dict= self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : Optional[Any]= sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Optional[int]= scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase__ : List[str]= model(snake_case__ , snake_case__ )
lowercase__ : Optional[int]= scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowercase__ : str= output.prev_sample
lowercase__ : int= torch.sum(torch.abs(snake_case__ ) )
lowercase__ : Optional[Any]= torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.scheduler_classes[0]
lowercase__ : str= self.get_scheduler_config()
lowercase__ : List[Any]= scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
lowercase__ : Optional[Any]= self.dummy_model()
lowercase__ : List[Any]= self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase__ : Optional[int]= scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase__ : str= model(snake_case__ , snake_case__ )
lowercase__ : Any= scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowercase__ : Tuple= output.prev_sample
lowercase__ : Tuple= torch.sum(torch.abs(snake_case__ ) )
lowercase__ : Optional[Any]= torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.scheduler_classes[0]
lowercase__ : Tuple= self.get_scheduler_config()
lowercase__ : Optional[int]= scheduler_class(**snake_case__ , use_karras_sigmas=snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
lowercase__ : Optional[Any]= self.dummy_model()
lowercase__ : Any= self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
lowercase__ : List[Any]= sample.to(snake_case__ )
for t in scheduler.timesteps:
lowercase__ : str= scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase__ : List[str]= model(snake_case__ , snake_case__ )
lowercase__ : str= scheduler.step(snake_case__ , snake_case__ , snake_case__ )
lowercase__ : str= output.prev_sample
lowercase__ : List[str]= torch.sum(torch.abs(snake_case__ ) )
lowercase__ : List[str]= torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def lowercase__(A ) ->str:
"""simple docstring"""
if not sentence:
return ""
lowercase__ : str= dict(zip(A , A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class __UpperCAmelCase( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
super().__init__(features=snake_case__ )
import jax
from jaxlib.xla_client import Device
if isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(snake_case__ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowercase__ : Optional[Any]= device if isinstance(snake_case__ , snake_case__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ : Optional[int]= self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
lowercase__ : Optional[int]= str(jax.devices()[0] )
lowercase__ : Any= jnp_array_kwargs
@staticmethod
def UpperCAmelCase_ ( ):
'''simple docstring'''
import jax
return {str(snake_case__ ): device for device in jax.devices()}
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , snake_case__ ) and column:
if all(
isinstance(snake_case__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(snake_case__ , axis=0 )
return column
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , (str, bytes, type(snake_case__ )) ):
return value
elif isinstance(snake_case__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase__ : Dict= {}
if isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase__ : Dict= {"dtype": jnp.intaa}
else:
lowercase__ : List[str]= {"dtype": jnp.intaa}
elif isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase__ : Optional[int]= {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(snake_case__ , PIL.Image.Image ):
lowercase__ : Optional[int]= np.asarray(snake_case__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ : Optional[Any]= self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(snake_case__ , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(snake_case__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(snake_case__ , "__array__" ) and not isinstance(snake_case__ , jax.Array ):
lowercase__ : Tuple= data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(snake_case__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
elif isinstance(snake_case__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
return self._tensorize(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , snake_case__ , map_list=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= self.numpy_arrow_extractor().extract_row(snake_case__ )
lowercase__ : List[str]= self.python_features_decoder.decode_row(snake_case__ )
return self.recursive_tensorize(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.numpy_arrow_extractor().extract_column(snake_case__ )
lowercase__ : Union[str, Any]= self.python_features_decoder.decode_column(snake_case__ , pa_table.column_names[0] )
lowercase__ : Tuple= self.recursive_tensorize(snake_case__ )
lowercase__ : Union[str, Any]= self._consolidate(snake_case__ )
return column
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.numpy_arrow_extractor().extract_batch(snake_case__ )
lowercase__ : Tuple= self.python_features_decoder.decode_batch(snake_case__ )
lowercase__ : List[str]= self.recursive_tensorize(snake_case__ )
for column_name in batch:
lowercase__ : Tuple= self._consolidate(batch[column_name] )
return batch
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 1 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def lowercase__(A ) ->np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def lowercase__(A , A , A ) ->np.ndarray:
"""simple docstring"""
lowercase__ : List[Any]= np.nan
for i in range(A ):
lowercase__ : int= features[:, labels == i]
lowercase__ : int= data.mean(1 )
# Centralize the data of class i
lowercase__ : Any= data - column_reshape(A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : Optional[Any]= np.dot(A , centered_data.T )
return covariance_sum / features.shape[1]
def lowercase__(A , A , A ) ->np.ndarray:
"""simple docstring"""
lowercase__ : Dict= features.mean(1 )
lowercase__ : Optional[Any]= np.nan
for i in range(A ):
lowercase__ : Optional[Any]= features[:, labels == i]
lowercase__ : List[str]= data.shape[1]
lowercase__ : Optional[Any]= data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : List[str]= device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
return covariance_sum / features.shape[1]
def lowercase__(A , A ) ->np.ndarray:
"""simple docstring"""
if features.any():
lowercase__ : Optional[Any]= features.mean(1 )
# Center the dataset
lowercase__ : Optional[Any]= features - np.reshape(A , (data_mean.size, 1) )
lowercase__ : str= np.dot(A , centered_data.T ) / features.shape[1]
lowercase__, lowercase__ : List[str]= np.linalg.eigh(A )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase__ : Optional[Any]= eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase__ : Optional[int]= np.dot(filtered_eigenvectors.T , A )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def lowercase__(A , A , A , A ) ->np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase__, lowercase__ : List[Any]= eigh(
covariance_between_classes(A , A , A ) , covariance_within_classes(A , A , A ) , )
lowercase__ : List[str]= eigenvectors[:, ::-1][:, :dimensions]
lowercase__, lowercase__, lowercase__ : Any= np.linalg.svd(A )
lowercase__ : Optional[int]= svd_matrix[:, 0:dimensions]
lowercase__ : Tuple= np.dot(filtered_svd_matrix.T , A )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def lowercase__() ->None:
"""simple docstring"""
lowercase__ : List[str]= np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase__ : List[Any]= np.array([0, 0, 0, 1, 1] )
lowercase__ : Union[str, Any]= 2
lowercase__ : Dict= 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A ) as error_info:
lowercase__ : Optional[int]= linear_discriminant_analysis(
A , A , A , A )
if isinstance(A , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def lowercase__() ->None:
"""simple docstring"""
lowercase__ : Union[str, Any]= np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase__ : Any= 2
lowercase__ : Any= np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(A ) as error_info:
lowercase__ : str= principal_component_analysis(A , A )
if not np.allclose(A , A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 1 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a : List[str] = logging.getLogger(__name__)
a : Union[str, Any] = """pytorch_model.bin"""
@dataclasses.dataclass
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__lowerCamelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__lowerCamelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__lowerCamelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "A csv or a json file containing the validation data."} )
__lowerCamelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "The name of the task to train on."} , )
__lowerCamelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__lowerCamelCase = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
__lowerCamelCase = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
__lowerCamelCase = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowerCamelCase = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
__lowerCamelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
__lowerCamelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
__lowerCamelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
__lowerCamelCase = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
__lowerCamelCase = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowerCamelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Random seed for initialization."} , )
def lowercase__(A , A , A , A , A , A ) ->str:
"""simple docstring"""
lowercase__ : List[Any]= datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowercase__ : int= dataset.filter(lambda A : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowercase__ : Optional[int]= int(eval_result * len(A ) )
print(A )
lowercase__ : Tuple= dataset.sort("probability" , reverse=A )
lowercase__ : Union[str, Any]= dataset.select(range(A ) )
lowercase__ : Tuple= dataset.remove_columns(["label", "probability"] )
lowercase__ : str= dataset.rename_column("prediction" , "label" )
lowercase__ : List[str]= dataset.map(lambda A : {"label": idalabel[example["label"]]} )
lowercase__ : Optional[Any]= dataset.shuffle(seed=args.seed )
lowercase__ : int= os.path.join(A , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(A , index=A )
else:
dataset.to_json(A )
def lowercase__(A , A , A , A , **A ) ->List[str]:
"""simple docstring"""
lowercase__ : Any= Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowercase__ : Optional[Any]= STModelArguments(model_name_or_path=A )
lowercase__ : Any= STDataArguments(train_file=A , infer_file=A )
lowercase__ : str= STTrainingArguments(output_dir=A )
lowercase__ : Optional[Any]= argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A ).items():
setattr(A , A , A )
for key, value in kwargs.items():
if hasattr(A , A ):
setattr(A , A , A )
# Sanity checks
lowercase__ : Tuple= {}
lowercase__ : Tuple= None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowercase__ : int= args.train_file
lowercase__ : int= args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowercase__ : Dict= args.eval_file
for key in data_files:
lowercase__ : str= data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
lowercase__ : int= extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
lowercase__ : List[str]= f'''{args.output_dir}/self-train_iter-{{}}'''.format
lowercase__ : Tuple= data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A )
os.makedirs(A , exist_ok=A )
accelerator.wait_for_everyone()
lowercase__ : List[str]= None
lowercase__ : int= None
lowercase__ : Any= 0
lowercase__ : Any= False
# Show the progress bar
lowercase__ : Any= tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowercase__ : Union[str, Any]= data_dir_format(A )
assert os.path.exists(A )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowercase__ : Optional[Any]= os.path.join(A , "stage-1" )
lowercase__ : Tuple= {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A , A ):
arguments_dict.update({key: value} )
lowercase__ : List[str]= os.path.join(A , "best-checkpoint" , A )
if os.path.exists(A ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , A , A , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info("Self-training job completed: iteration: %d, stage: 1." , A )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowercase__ : Union[str, Any]= os.path.join(A , "best-checkpoint" )
lowercase__ : int= os.path.join(A , "stage-2" )
# Update arguments_dict
lowercase__ : Dict= model_path
lowercase__ : Dict= data_files["train"]
lowercase__ : Optional[int]= current_output_dir
lowercase__ : Optional[Any]= os.path.join(A , "best-checkpoint" , A )
if os.path.exists(A ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , A , A , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info("Self-training job completed: iteration: %d, stage: 2." , A )
lowercase__ : Union[str, Any]= iteration
lowercase__ : str= data_dir_format(iteration + 1 )
lowercase__ : Optional[Any]= AutoConfig.from_pretrained(os.path.join(A , "best-checkpoint" ) )
lowercase__ : Dict= config.idalabel
lowercase__ : int= os.path.join(A , "eval_results_best-checkpoint.json" )
lowercase__ : Union[str, Any]= os.path.join(A , "test_results_best-checkpoint.json" )
assert os.path.exists(A )
with open(A , "r" ) as f:
lowercase__ : Optional[int]= float(json.load(A )[args.eval_metric] )
lowercase__ : List[Any]= os.path.join(A , "infer_output_best-checkpoint.csv" )
assert os.path.exists(A )
# Loading the dataset from local csv or json files.
lowercase__ : Tuple= load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
lowercase__ : Any= load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(A , exist_ok=A )
shutil.copy(A , os.path.join(A , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(A ):
shutil.copy(A , os.path.join(A , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(A , A , A , A , A , A )
accelerator.wait_for_everyone()
lowercase__ : List[Any]= os.path.join(A , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowercase__ : List[Any]= eval_result
if best_iteration is None:
lowercase__ : Optional[Any]= new_iteration
lowercase__ : str= new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowercase__ : int= new_iteration
lowercase__ : str= new_eval_result
lowercase__ : str= 0
else:
if new_eval_result == best_eval_result:
lowercase__ : List[Any]= new_iteration
lowercase__ : Dict= new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowercase__ : Optional[int]= True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , A )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(A , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(A , "eval_results_best-iteration.json" ) , )
| 85 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : str= []
for part_id in partition_order:
lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Dict= Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(2 )
lowercase__ : Optional[Any]= [1, 0]
lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions.
lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(1 )
lowercase__ : str= SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int= spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : Optional[Any]= lambda A : x.reverse()
lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Optional[int]= Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 85 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BlipImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
# add QFormer tokenizer
lowercase__ : Tuple= qformer_tokenizer
def __call__( self , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
lowercase__ : Optional[Any]= BatchFeature()
if text is not None:
lowercase__ : Optional[Any]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
encoding.update(snake_case__ )
lowercase__ : List[Any]= self.qformer_tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
lowercase__ : Optional[int]= qformer_text_encoding.pop("input_ids" )
lowercase__ : int= qformer_text_encoding.pop("attention_mask" )
if images is not None:
lowercase__ : int= self.image_processor(snake_case__ , return_tensors=snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.tokenizer.model_input_names
lowercase__ : Any= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase_ ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
if os.path.isfile(snake_case__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Tuple= os.path.join(snake_case__ , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(snake_case__ )
return super().save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
lowercase__ : List[str]= AutoTokenizer.from_pretrained(snake_case__ , subfolder="qformer_tokenizer" )
lowercase__ : str= cls._get_arguments_from_pretrained(snake_case__ , **snake_case__ )
args.append(snake_case__ )
return cls(*snake_case__ )
| 85 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase__), ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 | 1 |
"""simple docstring"""
a : List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : List[str] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : Union[str, Any] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def lowercase__(A , A , A ) ->str:
"""simple docstring"""
assert len(str(A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowercase__ : Tuple= year // 100
lowercase__ : Union[str, Any]= (5 * (century % 4) + 2) % 7
lowercase__ : List[str]= year % 100
lowercase__ : Union[str, Any]= centurian % 12
lowercase__ : Dict= (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowercase__ : Any= (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowercase__ : List[str]= (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 2
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ : int= sigma_max
# setable values
lowercase__ : int= None
lowercase__ : np.IntTensor= None
lowercase__ : torch.FloatTensor= None # sigma(t_i)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[Any]= num_inference_steps
lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ )
lowercase__ : Union[str, Any]= [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : str= 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
lowercase__ : str= sigma + gamma * sigma
lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output
lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : int= sample_prev + sigma_prev * model_output
lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
| 85 | 1 |
"""simple docstring"""
def lowercase__(A , A , A ) ->float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowercase__(A , A , A ) ->float:
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def lowercase__(A , A , A ) ->float:
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def lowercase__(A , A , A ) ->float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[str] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
a : Union[str, Any] = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
a : str = cvtColor(img, COLOR_BGR2GRAY)
def lowercase__() ->List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any]= cn.convert_to_negative(A )
# assert negative_img array for at least one True
assert negative_img.any()
def lowercase__() ->Tuple:
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(A , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Any= canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Any= imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase__ : str= canny.canny(A )
# assert canny array for at least one True
assert canny_array.any()
def lowercase__() ->Optional[Any]:
"""simple docstring"""
assert gg.gaussian_filter(A , 5 , sigma=0.9 ).all()
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowercase__ : Dict= conv.img_convolve(A , A ).astype(A )
assert res.any()
def lowercase__() ->str:
"""simple docstring"""
assert med.median_filter(A , 3 ).any()
def lowercase__() ->Any:
"""simple docstring"""
lowercase__, lowercase__ : List[Any]= sob.sobel_filter(A )
assert grad.any() and theta.any()
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : List[Any]= sp.make_sepia(A , 20 )
assert sepia.all()
def lowercase__(A = "digital_image_processing/image_data/lena_small.jpg" ) ->Tuple:
"""simple docstring"""
lowercase__ : str= bs.Burkes(imread(A , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowercase__(A = "digital_image_processing/image_data/lena_small.jpg" , ) ->Any:
"""simple docstring"""
lowercase__ : List[str]= rs.NearestNeighbour(imread(A , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[int]= "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
lowercase__ : Tuple= imread(A , 0 )
# Test for get_neighbors_pixel function() return not None
lowercase__ : Union[str, Any]= 0
lowercase__ : Any= 0
lowercase__ : Dict= image[x_coordinate][y_coordinate]
lowercase__ : Optional[Any]= lbp.get_neighbors_pixel(
A , A , A , A )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase__ : List[str]= np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowercase__ : Optional[int]= lbp.local_binary_value(A , A , A )
assert lbp_image.any()
| 85 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.unet.config.sample_size
lowercase__ : Dict= (batch_size, 3, img_size, img_size)
lowercase__ : List[Any]= self.unet
lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase__ : Tuple= sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample
lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean
lowercase__ : List[str]= sample_mean.clamp(0 , 1 )
lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : str= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 85 | 1 |
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
a : Dict = logging.get_logger(__name__)
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = None
@experimental
def lowercase__(A , A , A , A , A , A , A ) ->Union[str, Any]:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
A , A , A , A , A , A , A )
return _map_with_joblib(A , A , A , A , A , A , A )
def lowercase__(A , A , A , A , A , A , A ) ->Any:
"""simple docstring"""
lowercase__ : str= num_proc if num_proc <= len(A ) else len(A )
lowercase__ : str= [] # We organize the splits ourselve (contiguous splits)
for index in range(A ):
lowercase__ : Dict= len(A ) // num_proc
lowercase__ : str= len(A ) % num_proc
lowercase__ : Tuple= div * index + min(A , A )
lowercase__ : Any= start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(A ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(A )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(A )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
lowercase__, lowercase__ : List[Any]= None, None
if not disable_tqdm:
lowercase__, lowercase__ : Optional[int]= (RLock(),), tqdm.set_lock
with Pool(A , initargs=A , initializer=A ) as pool:
lowercase__ : Any= pool.map(A , A )
logger.info(f'''Finished {num_proc} processes''' )
lowercase__ : List[str]= [obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(A )} objects''' )
return mapped
def lowercase__(A , A , A , A , A , A , A ) ->Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=A ):
return joblib.Parallel()(
joblib.delayed(A )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowercase__(A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any]= backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowercase__ : Optional[int]= None
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 1 |
"""simple docstring"""
def lowercase__(A ) ->int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ : List[Any]= 0
lowercase__ : Union[str, Any]= len(A ) - 1
lowercase__ : str= 0
while index >= 0:
lowercase__ : Any= (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 85 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->float:
"""simple docstring"""
if not nums:
raise ValueError("List is empty" )
return sum(A ) / len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__(*A ) ->Any:
"""simple docstring"""
with open(A , "r" ) as fh:
fcntl.flock(A , fcntl.LOCK_EX )
try:
print(*A )
finally:
fcntl.flock(A , fcntl.LOCK_UN )
a : Tuple = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
a : Dict = torch.device("""cuda""", local_rank)
a : Dict = socket.gethostname()
a : Tuple = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a : Tuple = dist.get_rank()
a : str = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 85 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : List[str]= kwargs.pop("text_queries" )
if isinstance(snake_case__ , (str, Image.Image) ):
lowercase__ : Tuple= {"image": image, "candidate_labels": candidate_labels}
else:
lowercase__ : Dict= image
lowercase__ : Any= super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= {}
if "threshold" in kwargs:
lowercase__ : int= kwargs["threshold"]
if "top_k" in kwargs:
lowercase__ : List[str]= kwargs["top_k"]
return {}, {}, postprocess_params
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= load_image(inputs["image"] )
lowercase__ : List[str]= inputs["candidate_labels"]
if isinstance(snake_case__ , snake_case__ ):
lowercase__ : Union[str, Any]= candidate_labels.split("," )
lowercase__ : List[Any]= torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
lowercase__ : List[str]= self.tokenizer(snake_case__ , return_tensors=self.framework )
lowercase__ : Union[str, Any]= self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[str]= model_inputs.pop("target_size" )
lowercase__ : Tuple= model_inputs.pop("candidate_label" )
lowercase__ : int= model_inputs.pop("is_last" )
lowercase__ : Union[str, Any]= self.model(**snake_case__ )
lowercase__ : Union[str, Any]= {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0.1 , snake_case__=None ):
'''simple docstring'''
lowercase__ : Optional[Any]= []
for model_output in model_outputs:
lowercase__ : Optional[int]= model_output["candidate_label"]
lowercase__ : List[str]= BaseModelOutput(snake_case__ )
lowercase__ : str= self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
lowercase__ : Dict= outputs["scores"][index].item()
lowercase__ : List[Any]= self._get_bounding_box(outputs["boxes"][index][0] )
lowercase__ : List[Any]= {"score": score, "label": label, "box": box}
results.append(snake_case__ )
lowercase__ : Dict= sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
lowercase__ : List[str]= results[:top_k]
return results
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
lowercase__, lowercase__, lowercase__, lowercase__ : List[str]= box.int().tolist()
lowercase__ : Any= {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 85 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 1 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a : Tuple = logging.get_logger(__name__)
a : Any = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[str]= max_length
lowercase__ : Optional[int]= max_position_embeddings
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= input_ids.shape[-1]
lowercase__ : List[Any]= cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , snake_case__ , )
lowercase__ : Tuple= start_length
lowercase__ : Any= max_new_tokens
lowercase__ : Any= start_length + max_new_tokens
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Dict= max_time
lowercase__ : List[Any]= time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return any(criteria(snake_case__ , snake_case__ ) for criteria in self )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
elif isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
return None
def lowercase__(A , A ) ->StoppingCriteriaList:
"""simple docstring"""
lowercase__ : Any= stopping_criteria.max_length
lowercase__ : str= deepcopy(A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=A ) )
return new_stopping_criteria
| 85 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a : Any = logging.get_logger(__name__)
def lowercase__(A , A=False ) ->Dict:
"""simple docstring"""
lowercase__ : Union[str, Any]= []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ : Any= [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase__(A , A , A=False ) ->List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ : Any= ""
else:
lowercase__ : Dict= "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : int= state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowercase__ : Optional[Any]= state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Any= in_proj_weight[
: config.hidden_size, :
]
lowercase__ : int= in_proj_bias[: config.hidden_size]
lowercase__ : List[Any]= in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Dict= in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : Union[str, Any]= in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : Union[str, Any]= in_proj_bias[-config.hidden_size :]
def lowercase__(A ) ->str:
"""simple docstring"""
lowercase__ : str= ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase__(A , A , A ) ->List[str]:
"""simple docstring"""
lowercase__ : int= dct.pop(A )
lowercase__ : Any= val
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : Any= "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int= Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase__(A , A , A=False ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : str= BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=A , )
lowercase__ : Optional[int]= ViTHybridConfig(backbone_config=A , image_size=384 , num_labels=1_000 )
lowercase__ : List[Any]= False
# load original model from timm
lowercase__ : Union[str, Any]= timm.create_model(A , pretrained=A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ : Dict= timm_model.state_dict()
if base_model:
remove_classification_head_(A )
lowercase__ : Any= create_rename_keys(A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , A )
lowercase__ : Optional[int]= "huggingface/label-files"
lowercase__ : Tuple= "imagenet-1k-id2label.json"
lowercase__ : str= json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : List[Any]= idalabel
lowercase__ : Optional[Any]= {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase__ : Dict= ViTHybridModel(A ).eval()
else:
lowercase__ : List[Any]= ViTHybridForImageClassification(A ).eval()
model.load_state_dict(A )
# create image processor
lowercase__ : List[str]= create_transform(**resolve_data_config({} , model=A ) )
lowercase__ : Dict= transform.transforms
lowercase__ : List[str]= {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ : Union[str, Any]= ViTHybridImageProcessor(
do_resize=A , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ : List[Any]= prepare_img()
lowercase__ : Optional[Any]= transform(A ).unsqueeze(0 )
lowercase__ : List[Any]= processor(A , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(A , A )
# verify logits
with torch.no_grad():
lowercase__ : int= model(A )
lowercase__ : Tuple= outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
lowercase__ : List[str]= timm_model.forward_features(A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A , outputs.pooler_output , atol=1e-3 )
else:
lowercase__ : int= timm_model(A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
a : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 85 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 1 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
a : int = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
a : str = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = CamembertTokenizer
__lowerCamelCase = CamembertTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Union[str, Any]= CamembertTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= "<pad>"
lowercase__ : List[Any]= 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(snake_case__ ) , 1004 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= CamembertTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
lowercase__ : Tuple= CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowercase__ : Union[str, Any]= "I was born in 92000, and this is falsé."
lowercase__ : List[Any]= tokenizer.encode(snake_case__ )
lowercase__ : List[Any]= rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowercase__ : List[Any]= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowercase__ : Union[str, Any]= rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowercase__ : Union[str, Any]= tokenizer.convert_ids_to_tokens(snake_case__ )
lowercase__ : List[str]= rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase__ : str= self.get_tokenizer()
lowercase__ : List[str]= self.get_rust_tokenizer()
lowercase__ : Any= "I was born in 92000, and this is falsé."
lowercase__ : str= tokenizer.tokenize(snake_case__ )
lowercase__ : Union[str, Any]= rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowercase__ : Dict= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowercase__ : Dict= rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowercase__ : str= self.get_rust_tokenizer()
lowercase__ : List[Any]= tokenizer.encode(snake_case__ )
lowercase__ : str= rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# fmt: off
lowercase__ : int= {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowercase__ : Dict= [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=snake_case__ , )
| 85 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 1 |
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase__(A ) ->bool:
"""simple docstring"""
return np.array_equal(A , matrix.conjugate().T )
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Union[str, Any]= v.conjugate().T
lowercase__ : List[Any]= v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def lowercase__() ->None:
"""simple docstring"""
lowercase__ : Optional[int]= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ : Optional[int]= np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ : List[Any]= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 85 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = 0 ):
'''simple docstring'''
lowercase__, lowercase__ : Dict= row, column
lowercase__ : Dict= [[default_value for c in range(snake_case__ )] for r in range(snake_case__ )]
def __str__( self ):
'''simple docstring'''
lowercase__ : List[str]= F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowercase__ : str= 0
for row_vector in self.array:
for obj in row_vector:
lowercase__ : Tuple= max(snake_case__ , len(str(snake_case__ ) ) )
lowercase__ : int= F'''%{max_element_length}s'''
# Make string and return
def single_line(snake_case__ ) -> str:
nonlocal string_format_identifier
lowercase__ : Tuple= "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(snake_case__ ) for row_vector in self.array )
return s
def __repr__( self ):
'''simple docstring'''
return str(self )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if not (isinstance(snake_case__ , (list, tuple) ) and len(snake_case__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , snake_case__ ):
'''simple docstring'''
assert self.validate_indicies(snake_case__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
assert self.validate_indicies(snake_case__ )
lowercase__ : List[Any]= value
def __add__( self , snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ )
assert self.row == another.row and self.column == another.column
# Add
lowercase__ : Union[str, Any]= Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : str= self[r, c] + another[r, c]
return result
def __neg__( self ):
'''simple docstring'''
lowercase__ : int= Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : Optional[Any]= -self[r, c]
return result
def __sub__( self , snake_case__ ):
'''simple docstring'''
return self + (-another)
def __mul__( self , snake_case__ ):
'''simple docstring'''
if isinstance(snake_case__ , (int, float) ): # Scalar multiplication
lowercase__ : Optional[Any]= Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : Dict= self[r, c] * another
return result
elif isinstance(snake_case__ , snake_case__ ): # Matrix multiplication
assert self.column == another.row
lowercase__ : int= Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowercase__ : Union[str, Any]= F'''Unsupported type given for another ({type(snake_case__ )})'''
raise TypeError(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ : List[Any]= self[r, c]
return result
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowercase__ : List[Any]= v.transpose()
lowercase__ : List[str]= (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase__() ->None:
"""simple docstring"""
lowercase__ : str= Matrix(3 , 3 , 0 )
for i in range(3 ):
lowercase__ : Optional[int]= 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowercase__ : Optional[Any]= Matrix(3 , 1 , 0 )
lowercase__, lowercase__, lowercase__ : Union[str, Any]= 1, 2, -3
lowercase__ : Union[str, Any]= Matrix(3 , 1 , 0 )
lowercase__, lowercase__, lowercase__ : int= 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(A , A )}''' )
def lowercase__() ->None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 85 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a : int = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
return super().__call__(snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= {}
if "candidate_labels" in kwargs:
lowercase__ : Optional[Any]= kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowercase__ : Tuple= kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None , snake_case__="This is a photo of {}." ):
'''simple docstring'''
lowercase__ : Tuple= load_image(snake_case__ )
lowercase__ : List[str]= self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__ : Any= candidate_labels
lowercase__ : Optional[Any]= [hypothesis_template.format(snake_case__ ) for x in candidate_labels]
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=self.framework , padding=snake_case__ )
lowercase__ : int= [text_inputs]
return inputs
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= model_inputs.pop("candidate_labels" )
lowercase__ : Optional[int]= model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , snake_case__ ):
lowercase__ : List[Any]= text_inputs[0]
else:
# Batching case.
lowercase__ : Dict= text_inputs[0][0]
lowercase__ : List[Any]= self.model(**snake_case__ , **snake_case__ )
lowercase__ : List[Any]= {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= model_outputs.pop("candidate_labels" )
lowercase__ : Optional[int]= model_outputs["logits"][0]
if self.framework == "pt":
lowercase__ : Union[str, Any]= logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : List[str]= probs.tolist()
if not isinstance(snake_case__ , snake_case__ ):
lowercase__ : Optional[Any]= [scores]
elif self.framework == "tf":
lowercase__ : Any= stable_softmax(snake_case__ , axis=-1 )
lowercase__ : str= probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowercase__ : List[Any]= [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(snake_case__ , snake_case__ ) , key=lambda snake_case__ : -x[0] )
]
return result
| 85 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 1 |
"""simple docstring"""
def lowercase__(A , A = False ) ->str:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Optional[int]= f'''Expected string as input, found {type(A )}'''
raise ValueError(A )
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''Expected boolean as use_pascal parameter, found {type(A )}'''
raise ValueError(A )
lowercase__ : List[str]= input_str.split("_" )
lowercase__ : Optional[int]= 0 if use_pascal else 1
lowercase__ : List[str]= words[start_index:]
lowercase__ : Any= [word[0].upper() + word[1:] for word in words_to_capitalize]
lowercase__ : str= "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 85 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 1 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowercase__(A ) ->Optional[Any]:
"""simple docstring"""
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCAmelCase:
"""simple docstring"""
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= np.abs((a - b) ).max()
self.assertLessEqual(snake_case__ , snake_case__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowercase__ : Any= FlaxVisionTextDualEncoderModel(snake_case__ )
lowercase__ : List[str]= model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Any= self.get_vision_text_model(snake_case__ , snake_case__ )
lowercase__ : Any= {"vision_model": vision_model, "text_model": text_model}
lowercase__ : Optional[Any]= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowercase__ : int= model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Union[str, Any]= self.get_vision_text_model(snake_case__ , snake_case__ )
lowercase__ : int= {"vision_model": vision_model, "text_model": text_model}
lowercase__ : Tuple= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowercase__ : Optional[int]= model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
lowercase__ : int= output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowercase__ : Any= FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowercase__ : Union[str, Any]= model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
lowercase__ : Optional[Any]= after_output[0]
lowercase__ : List[str]= np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1e-3 )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Union[str, Any]= self.get_vision_text_model(snake_case__ , snake_case__ )
lowercase__ : Tuple= {"vision_model": vision_model, "text_model": text_model}
lowercase__ : Any= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowercase__ : Union[str, Any]= model(
input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , output_attentions=snake_case__ )
lowercase__ : Tuple= output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : List[Any]= to_atuple(vision_model.config.image_size )
lowercase__ : Dict= to_atuple(vision_model.config.patch_size )
lowercase__ : Tuple= (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase__ : List[str]= num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase__ : int= output.text_model_output.attentions
self.assertEqual(len(snake_case__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
pt_model.to(snake_case__ )
pt_model.eval()
# prepare inputs
lowercase__ : Dict= inputs_dict
lowercase__ : Tuple= {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase__ : Union[str, Any]= pt_model(**snake_case__ ).to_tuple()
lowercase__ : Optional[int]= fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
lowercase__ : Union[str, Any]= FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ , from_pt=snake_case__ )
lowercase__ : Union[str, Any]= fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= VisionTextDualEncoderModel.from_pretrained(snake_case__ , from_flax=snake_case__ )
pt_model_loaded.to(snake_case__ )
pt_model_loaded.eval()
with torch.no_grad():
lowercase__ : List[str]= pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case__ , pt_output_loaded.numpy() , 4e-2 )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowercase__ : Tuple= VisionTextDualEncoderModel(snake_case__ )
lowercase__ : Optional[int]= FlaxVisionTextDualEncoderModel(snake_case__ )
lowercase__ : Optional[Any]= convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
lowercase__ : Optional[int]= fx_state
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowercase__ : Dict= VisionTextDualEncoderModel(snake_case__ )
lowercase__ : Optional[Any]= FlaxVisionTextDualEncoderModel(snake_case__ )
lowercase__ : Tuple= load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.prepare_config_and_inputs()
self.check_save_load(**snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case__ )
@is_pt_flax_cross_test
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.prepare_config_and_inputs()
lowercase__ : Dict= config_inputs_dict.pop("vision_config" )
lowercase__ : List[str]= config_inputs_dict.pop("text_config" )
lowercase__ : Optional[int]= config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case__ , snake_case__ , snake_case__ )
self.check_equivalence_flax_to_pt(snake_case__ , snake_case__ , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[int]= self.get_pretrained_model_and_inputs()
lowercase__ : Optional[Any]= model_a(**snake_case__ )
lowercase__ : List[str]= outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case__ )
lowercase__ : Optional[int]= FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowercase__ : Union[str, Any]= model_a(**snake_case__ )
lowercase__ : Optional[Any]= after_outputs[0]
lowercase__ : Optional[Any]= np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1e-5 )
@require_flax
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
lowercase__ : Tuple= 13
lowercase__ : List[Any]= floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ : List[Any]= ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase__ : List[str]= random_attention_mask([batch_size, 4] )
lowercase__ : List[str]= {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= FlaxViTModel(snake_case__ )
lowercase__ : Optional[Any]= FlaxBertModel(snake_case__ )
return vision_model, text_model
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= FlaxViTModelTester(self )
lowercase__ : List[str]= FlaxBertModelTester(self )
lowercase__ : Tuple= vit_model_tester.prepare_config_and_inputs()
lowercase__ : List[str]= bert_model_tester.prepare_config_and_inputs()
lowercase__, lowercase__ : str= vision_config_and_inputs
lowercase__, lowercase__, lowercase__, lowercase__ : str= text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
lowercase__ : Tuple= 13
lowercase__ : Any= floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ : List[Any]= ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase__ : List[Any]= random_attention_mask([batch_size, 4] )
lowercase__ : Tuple= {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= FlaxCLIPVisionModel(snake_case__ )
lowercase__ : Optional[int]= FlaxBertModel(snake_case__ )
return vision_model, text_model
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= FlaxCLIPVisionModelTester(self )
lowercase__ : Union[str, Any]= FlaxBertModelTester(self )
lowercase__ : Any= clip_model_tester.prepare_config_and_inputs()
lowercase__ : Union[str, Any]= bert_model_tester.prepare_config_and_inputs()
lowercase__, lowercase__ : Optional[int]= vision_config_and_inputs
lowercase__, lowercase__, lowercase__, lowercase__ : Dict= text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
lowercase__ : Tuple= VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
lowercase__ : List[str]= Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase__ : int= processor(
text=["una foto di un gatto", "una foto di un cane"] , images=snake_case__ , padding=snake_case__ , return_tensors="np" )
lowercase__ : Any= model(**snake_case__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase__ : Tuple= np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , snake_case__ , atol=1e-3 ) )
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 1 |
"""simple docstring"""
import math
from collections.abc import Callable
def lowercase__(A , A , A ) ->float:
"""simple docstring"""
lowercase__ : float= xa
lowercase__ : float= xa
while True:
if x_n == x_na or function(A ) == function(A ):
raise ZeroDivisionError("float division by zero, could not find root" )
lowercase__ : float= x_na - (
function(A ) / ((function(A ) - function(A )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowercase__ : int= x_na
lowercase__ : Union[str, Any]= x_na
def lowercase__(A ) ->float:
"""simple docstring"""
return math.pow(A , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 85 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= params
lowercase__ : Optional[Any]= np.array(snake_case__ )
lowercase__ : Optional[Any]= np.array([len(snake_case__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , snake_case__ ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.params.max_model_input_size
lowercase__ : int= self.lengths > max_len
logger.info(F'''Splitting {sum(snake_case__ )} too long sequences.''' )
def divide_chunks(snake_case__ , snake_case__ ):
return [l[i : i + n] for i in range(0 , len(snake_case__ ) , snake_case__ )]
lowercase__ : Any= []
lowercase__ : Union[str, Any]= []
if self.params.mlm:
lowercase__, lowercase__ : Union[str, Any]= self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
lowercase__, lowercase__ : Union[str, Any]= self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase__ : str= []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase__ : Dict= np.insert(snake_case__ , 0 , snake_case__ )
if sub_s[-1] != sep_id:
lowercase__ : List[str]= np.insert(snake_case__ , len(snake_case__ ) , snake_case__ )
assert len(snake_case__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(snake_case__ )
new_tok_ids.extend(snake_case__ )
new_lengths.extend([len(snake_case__ ) for l in sub_seqs] )
lowercase__ : Optional[Any]= np.array(snake_case__ )
lowercase__ : Tuple= np.array(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= len(self )
lowercase__ : List[Any]= self.lengths > 11
lowercase__ : List[Any]= self.token_ids[indices]
lowercase__ : Any= self.lengths[indices]
lowercase__ : Dict= len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__ : Optional[int]= self.params.special_tok_ids["unk_token"]
lowercase__ : List[str]= len(self )
lowercase__ : Tuple= np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase__ : Dict= (unk_occs / self.lengths) < 0.5
lowercase__ : int= self.token_ids[indices]
lowercase__ : List[Any]= self.lengths[indices]
lowercase__ : int= len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : str= [t[0] for t in batch]
lowercase__ : Union[str, Any]= [t[1] for t in batch]
assert len(snake_case__ ) == len(snake_case__ )
# Max for paddings
lowercase__ : int= max(snake_case__ )
# Pad token ids
if self.params.mlm:
lowercase__ : Optional[int]= self.params.special_tok_ids["pad_token"]
else:
lowercase__ : Optional[Any]= self.params.special_tok_ids["unk_token"]
lowercase__ : Union[str, Any]= [list(t.astype(snake_case__ ) ) + [pad_idx] * (max_seq_len_ - len(snake_case__ )) for t in token_ids]
assert len(tk_ ) == len(snake_case__ )
assert all(len(snake_case__ ) == max_seq_len_ for t in tk_ )
lowercase__ : Optional[Any]= torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase__ : Optional[int]= torch.tensor(snake_case__ ) # (bs)
return tk_t, lg_t
| 85 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : str= []
for part_id in partition_order:
lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Dict= Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(2 )
lowercase__ : Optional[Any]= [1, 0]
lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions.
lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(1 )
lowercase__ : str= SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int= spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : Optional[Any]= lambda A : x.reverse()
lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Optional[int]= Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 85 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase__), ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = FunnelTokenizer
__lowerCamelCase = FunnelTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ : Any= [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : List[Any]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= "UNwant\u00E9d,running"
lowercase__ : Dict= "unwanted, running"
return input_text, output_text
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.tokenizer_class(self.vocab_file )
lowercase__ : List[str]= tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
lowercase__ : Optional[Any]= tokenizer("UNwant\u00E9d,running" )
lowercase__ : Dict= len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowercase__ : Optional[int]= tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 85 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 2
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ : int= sigma_max
# setable values
lowercase__ : int= None
lowercase__ : np.IntTensor= None
lowercase__ : torch.FloatTensor= None # sigma(t_i)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[Any]= num_inference_steps
lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ )
lowercase__ : Union[str, Any]= [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : str= 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
lowercase__ : str= sigma + gamma * sigma
lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output
lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : int= sample_prev + sigma_prev * model_output
lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
| 85 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a : List[str] = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ["""DPTFeatureExtractor"""]
a : List[Any] = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 | 1 |
"""simple docstring"""
import requests
a : Union[str, Any] = """YOUR API KEY"""
def lowercase__(A , A = giphy_api_key ) ->list:
"""simple docstring"""
lowercase__ : str= "+".join(query.split() )
lowercase__ : Optional[int]= f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
lowercase__ : Dict= requests.get(A ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 85 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = KandinskyVaaImgaImgPipeline
__lowerCamelCase = ["image_embeds", "negative_image_embeds", "image"]
__lowerCamelCase = [
"image_embeds",
"negative_image_embeds",
"image",
]
__lowerCamelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCamelCase = False
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 100
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ : List[Any]= {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowercase__ : Optional[int]= UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ : Optional[Any]= VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.dummy_unet
lowercase__ : int= self.dummy_movq
lowercase__ : Any= {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowercase__ : List[Any]= DDIMScheduler(**snake_case__ )
lowercase__ : List[str]= {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
lowercase__ : str= floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase__ : List[Any]= floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowercase__ : List[str]= floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase__ : List[Any]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : Tuple= Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
if str(snake_case__ ).startswith("mps" ):
lowercase__ : List[Any]= torch.manual_seed(snake_case__ )
else:
lowercase__ : int= torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase__ : Optional[Any]= {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= "cpu"
lowercase__ : Dict= self.get_dummy_components()
lowercase__ : Optional[int]= self.pipeline_class(**snake_case__ )
lowercase__ : List[str]= pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Union[str, Any]= pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase__ : Optional[int]= output.images
lowercase__ : Union[str, Any]= pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase__ : Optional[Any]= image[0, -3:, -3:, -1]
lowercase__ : Tuple= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any]= np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowercase__ : Optional[Any]= load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowercase__ : Dict= "A red cartoon frog, 4k"
lowercase__ : Tuple= KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase__ : Any= KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowercase__ : str= pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Optional[int]= torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__, lowercase__ : int= pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowercase__ : Any= pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowercase__ : Any= output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 85 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.unet.config.sample_size
lowercase__ : Dict= (batch_size, 3, img_size, img_size)
lowercase__ : List[Any]= self.unet
lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase__ : Tuple= sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample
lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean
lowercase__ : List[str]= sample_mean.clamp(0 , 1 )
lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : str= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 85 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a : Optional[Any] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
a : Any = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
a : int = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : List[str]= spearmanr(snake_case__ , snake_case__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a : int = random.Random()
def lowercase__(A , A=1.0 , A=None , A=None ) ->List[Any]:
"""simple docstring"""
if rng is None:
lowercase__ : int= global_rng
lowercase__ : List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=400 , snake_case__=2000 , snake_case__=2048 , snake_case__=128 , snake_case__=1 , snake_case__=512 , snake_case__=30 , snake_case__=44100 , ):
'''simple docstring'''
lowercase__ : Dict= parent
lowercase__ : Tuple= batch_size
lowercase__ : int= min_seq_length
lowercase__ : Union[str, Any]= max_seq_length
lowercase__ : str= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : str= spectrogram_length
lowercase__ : Optional[Any]= feature_size
lowercase__ : Any= num_audio_channels
lowercase__ : Tuple= hop_length
lowercase__ : Optional[Any]= chunk_length
lowercase__ : Union[str, Any]= sampling_rate
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase_ ( self , snake_case__=False , snake_case__=False ):
'''simple docstring'''
def _flatten(snake_case__ ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
lowercase__ : Tuple= [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : str= [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : List[str]= [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = TvltFeatureExtractor
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= TvltFeatureExtractionTester(self )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case__ , "spectrogram_length" ) )
self.assertTrue(hasattr(snake_case__ , "feature_size" ) )
self.assertTrue(hasattr(snake_case__ , "num_audio_channels" ) )
self.assertTrue(hasattr(snake_case__ , "hop_length" ) )
self.assertTrue(hasattr(snake_case__ , "chunk_length" ) )
self.assertTrue(hasattr(snake_case__ , "sampling_rate" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Tuple= feat_extract_first.save_pretrained(snake_case__ )[0]
check_json_file_has_correct_format(snake_case__ )
lowercase__ : List[Any]= self.feature_extraction_class.from_pretrained(snake_case__ )
lowercase__ : int= feat_extract_first.to_dict()
lowercase__ : Optional[Any]= feat_extract_second.to_dict()
lowercase__ : Optional[int]= dict_first.pop("mel_filters" )
lowercase__ : List[str]= dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : int= os.path.join(snake_case__ , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case__ )
lowercase__ : List[Any]= self.feature_extraction_class.from_json_file(snake_case__ )
lowercase__ : int= feat_extract_first.to_dict()
lowercase__ : List[Any]= feat_extract_second.to_dict()
lowercase__ : Optional[Any]= dict_first.pop("mel_filters" )
lowercase__ : str= dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# Initialize feature_extractor
lowercase__ : Tuple= self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowercase__ : List[Any]= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : List[Any]= [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test not batched input
lowercase__ : Optional[Any]= feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowercase__ : List[Any]= feature_extractor(snake_case__ , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowercase__ : Optional[int]= feature_extractor(
snake_case__ , return_tensors="np" , sampling_rate=44100 , mask_audio=snake_case__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowercase__ : Union[str, Any]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ : Optional[int]= np.asarray(snake_case__ )
lowercase__ : Any= feature_extractor(snake_case__ , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase__ : str= ds.sort("id" ).select(range(snake_case__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self._load_datasamples(1 )
lowercase__ : List[str]= TvltFeatureExtractor()
lowercase__ : Optional[int]= feature_extractor(snake_case__ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowercase__ : str= torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case__ , atol=1e-4 ) )
| 85 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "Speech2TextFeatureExtractor"
__lowerCamelCase = "Speech2TextTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : Any= self.feature_extractor
lowercase__ : List[Any]= False
def __call__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowercase__ : Optional[int]= kwargs.pop("raw_speech" )
else:
lowercase__ : List[str]= kwargs.pop("audio" , snake_case__ )
lowercase__ : Tuple= kwargs.pop("sampling_rate" , snake_case__ )
lowercase__ : List[Any]= kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
lowercase__ : Optional[Any]= args[0]
lowercase__ : Tuple= args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase__ : int= self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
lowercase__ : str= self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ : List[Any]= encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def UpperCAmelCase_ ( self ):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowercase__ : Optional[int]= True
lowercase__ : str= self.tokenizer
yield
lowercase__ : Optional[Any]= self.feature_extractor
lowercase__ : Any= False
| 85 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 1 |
"""simple docstring"""
from math import pow
def lowercase__(A , A , A , A , A , ) ->tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowercase__ : int= int(pow(A , A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowercase__, lowercase__ : Tuple= backtrack(
A , A , current_number + 1 , A , A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowercase__, lowercase__ : str= backtrack(
A , A , current_number + 1 , A , A )
return current_sum, solutions_count
def lowercase__(A , A ) ->int:
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(A , A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 1 |
"""simple docstring"""
def lowercase__(A = 600_851_475_143 ) ->int:
"""simple docstring"""
try:
lowercase__ : List[Any]= int(A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
lowercase__ : List[Any]= 2
lowercase__ : Dict= 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase__ : Union[str, Any]= i
while n % i == 0:
lowercase__ : Optional[int]= n // i
i += 1
return int(A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase__ : Optional[Any]= load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase__ : Tuple= "xvjiarui/stable-diffusion-2-inpainting"
lowercase__, lowercase__ : Tuple= FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case__ , safety_checker=snake_case__ )
lowercase__ : Any= "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ : List[Any]= jax.random.PRNGKey(0 )
lowercase__ : List[Any]= 50
lowercase__ : Any= jax.device_count()
lowercase__ : int= num_samples * [prompt]
lowercase__ : int= num_samples * [init_image]
lowercase__ : Tuple= num_samples * [mask_image]
lowercase__, lowercase__, lowercase__ : List[str]= pipeline.prepare_inputs(snake_case__ , snake_case__ , snake_case__ )
# shard inputs and rng
lowercase__ : Any= replicate(snake_case__ )
lowercase__ : str= jax.random.split(snake_case__ , jax.device_count() )
lowercase__ : Dict= shard(snake_case__ )
lowercase__ : Tuple= shard(snake_case__ )
lowercase__ : Optional[int]= shard(snake_case__ )
lowercase__ : int= pipeline(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ )
lowercase__ : Dict= output.images.reshape(snake_case__ , 512 , 512 , 3 )
lowercase__ : List[Any]= images[0, 253:256, 253:256, -1]
lowercase__ : Dict= jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Any= jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 85 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 1 |
"""simple docstring"""
a : str = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 85 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 1 |
"""simple docstring"""
def lowercase__(A ) ->str:
"""simple docstring"""
return "".join(chr(ord(A ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 85 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for _ in range(A ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase__, lowercase__ : Optional[int]= arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a : Union[str, Any] = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 85 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->bool:
"""simple docstring"""
if len(A ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
lowercase__ : Tuple= nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 1 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
def __call__( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowercase__ : List[str]= 1
lowercase__ : int= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : Optional[Any]= self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
lowercase__ : List[Any]= scheduler_output - scheduler_output + torch.ones_like(snake_case__ )
return result
| 85 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Tuple = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "mra"
def __init__( self , snake_case__=50265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="absolute" , snake_case__=4 , snake_case__="full" , snake_case__=0 , snake_case__=0 , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowercase__ : int= vocab_size
lowercase__ : str= max_position_embeddings
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Optional[Any]= num_hidden_layers
lowercase__ : List[Any]= num_attention_heads
lowercase__ : str= intermediate_size
lowercase__ : List[str]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : List[str]= attention_probs_dropout_prob
lowercase__ : Any= initializer_range
lowercase__ : List[str]= type_vocab_size
lowercase__ : Tuple= layer_norm_eps
lowercase__ : int= position_embedding_type
lowercase__ : Dict= block_per_row
lowercase__ : List[Any]= approx_mode
lowercase__ : Any= initial_prior_first_n_blocks
lowercase__ : Dict= initial_prior_diagonal_n_blocks
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : Dict= [True] * limit
lowercase__ : Any= False
lowercase__ : Dict= False
lowercase__ : Optional[int]= True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase__ : List[Any]= i * 2
while index < limit:
lowercase__ : Dict= False
lowercase__ : int= index + i
lowercase__ : Optional[Any]= [2]
for i in range(3 , A , 2 ):
if is_prime[i]:
primes.append(A )
return primes
def lowercase__(A = 1_000_000 ) ->int:
"""simple docstring"""
lowercase__ : str= prime_sieve(A )
lowercase__ : Optional[int]= 0
lowercase__ : Optional[Any]= 0
for i in range(len(A ) ):
for j in range(i + length , len(A ) ):
lowercase__ : Tuple= sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : str= j - i
lowercase__ : Dict= sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
a : Optional[Any] = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
a : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowercase__(A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : int= (images / 2 + 0.5).clamp(0 , 1 )
lowercase__ : int= images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ : List[Any]= numpy_to_pil(A )
return images
def lowercase__(A ) ->List[str]:
"""simple docstring"""
if images.ndim == 3:
lowercase__ : List[Any]= images[None, ...]
lowercase__ : str= (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase__ : str= [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowercase__ : List[str]= [Image.fromarray(A ) for image in images]
return pil_images
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 1 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowercase__(A , A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Any= 0
if start < end:
lowercase__ : Optional[int]= randint(A , A )
lowercase__ : Any= a[end]
lowercase__ : List[str]= a[pivot]
lowercase__ : Dict= temp
lowercase__, lowercase__ : Tuple= _in_place_partition(A , A , A )
count += _in_place_quick_sort(A , A , p - 1 )
count += _in_place_quick_sort(A , p + 1 , A )
return count
def lowercase__(A , A , A ) ->Tuple:
"""simple docstring"""
lowercase__ : int= 0
lowercase__ : Optional[Any]= randint(A , A )
lowercase__ : Union[str, Any]= a[end]
lowercase__ : int= a[pivot]
lowercase__ : Tuple= temp
lowercase__ : Tuple= start - 1
for index in range(A , A ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase__ : Dict= new_pivot_index + 1
lowercase__ : Optional[int]= a[new_pivot_index]
lowercase__ : Dict= a[index]
lowercase__ : Tuple= temp
lowercase__ : str= a[new_pivot_index + 1]
lowercase__ : Dict= a[end]
lowercase__ : List[str]= temp
return new_pivot_index + 1, count
a : Optional[int] = TemporaryFile()
a : int = 100 # 1000 elements are to be sorted
a , a : Tuple = 0, 1 # mean and standard deviation
a : int = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : str = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 85 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : str = logging.get_logger(__name__)
def lowercase__(A , A=False ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowercase__ : Optional[Any]= [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowercase__(A , A , A=False ) ->int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ : Tuple= ""
else:
lowercase__ : int= "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : List[Any]= state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowercase__ : Optional[Any]= state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Dict= in_proj_weight[
: config.hidden_size, :
]
lowercase__ : int= in_proj_bias[: config.hidden_size]
lowercase__ : List[Any]= in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : List[str]= in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : Union[str, Any]= in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : Dict= in_proj_bias[-config.hidden_size :]
def lowercase__(A , A , A ) ->int:
"""simple docstring"""
lowercase__ : Optional[Any]= dct.pop(A )
lowercase__ : List[Any]= val
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : List[Any]= "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Any= Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase__(A , A ) ->int:
"""simple docstring"""
lowercase__ : List[str]= DeiTConfig()
# all deit models have fine-tuned heads
lowercase__ : List[Any]= False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowercase__ : Dict= 1_000
lowercase__ : Optional[Any]= "huggingface/label-files"
lowercase__ : Dict= "imagenet-1k-id2label.json"
lowercase__ : Any= json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
lowercase__ : Any= {int(A ): v for k, v in idalabel.items()}
lowercase__ : List[Any]= idalabel
lowercase__ : Any= {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int]= int(deit_name[-6:-4] )
lowercase__ : Dict= int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowercase__ : str= 192
lowercase__ : int= 768
lowercase__ : Dict= 12
lowercase__ : Optional[Any]= 3
elif deit_name[9:].startswith("small" ):
lowercase__ : Dict= 384
lowercase__ : Dict= 1_536
lowercase__ : int= 12
lowercase__ : Any= 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowercase__ : Any= 1_024
lowercase__ : Optional[Any]= 4_096
lowercase__ : List[str]= 24
lowercase__ : str= 16
# load original model from timm
lowercase__ : str= timm.create_model(A , pretrained=A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ : Optional[Any]= timm_model.state_dict()
lowercase__ : List[Any]= create_rename_keys(A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , A )
# load HuggingFace model
lowercase__ : Any= DeiTForImageClassificationWithTeacher(A ).eval()
model.load_state_dict(A )
# Check outputs on an image, prepared by DeiTImageProcessor
lowercase__ : Optional[Any]= int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowercase__ : Dict= DeiTImageProcessor(size=A , crop_size=config.image_size )
lowercase__ : Dict= image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : List[Any]= encoding["pixel_values"]
lowercase__ : List[str]= model(A )
lowercase__ : Optional[Any]= timm_model(A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1e-3 )
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 85 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : str= []
for part_id in partition_order:
lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Dict= Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(2 )
lowercase__ : Optional[Any]= [1, 0]
lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions.
lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(1 )
lowercase__ : str= SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int= spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : Optional[Any]= lambda A : x.reverse()
lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Optional[int]= Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 85 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__ : Optional[int]= mock.Mock()
lowercase__ : Tuple= 500
lowercase__ : Tuple= {}
lowercase__ : Any= HTTPError
lowercase__ : Dict= {}
# Download this model to make sure it's in the cache.
lowercase__ : Tuple= WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
lowercase__ : Tuple= WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Union[str, Any]= WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
lowercase__ : str= TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= WavaVecaFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
lowercase__ : Optional[Any]= WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case__ , repo_id="test-feature-extractor" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : Any= WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= WavaVecaFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
lowercase__ : Dict= WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : Any= WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
lowercase__ : Tuple= CustomFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
lowercase__ : int= AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 85 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase__), ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 | 1 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=100 , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=[0, 1, 2, 3] , ):
'''simple docstring'''
lowercase__ : Any= parent
lowercase__ : Tuple= 100
lowercase__ : Optional[int]= batch_size
lowercase__ : Optional[int]= image_size
lowercase__ : List[Any]= patch_size
lowercase__ : Union[str, Any]= num_channels
lowercase__ : Tuple= is_training
lowercase__ : Union[str, Any]= use_labels
lowercase__ : Any= hidden_size
lowercase__ : Optional[int]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : int= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Dict= hidden_dropout_prob
lowercase__ : Optional[Any]= attention_probs_dropout_prob
lowercase__ : Optional[Any]= type_sequence_label_size
lowercase__ : str= initializer_range
lowercase__ : Union[str, Any]= scope
lowercase__ : Union[str, Any]= out_indices
lowercase__ : List[str]= num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : Tuple= (image_size // patch_size) ** 2
lowercase__ : Any= num_patches + 1
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int= None
lowercase__ : Union[str, Any]= None
if self.use_labels:
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : str= ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : str= self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : int= BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.type_sequence_label_size
lowercase__ : int= BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Optional[Any]= 1
lowercase__ : int= BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : List[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.num_labels
lowercase__ : str= BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase__ : Union[str, Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__, lowercase__ : str= config_and_inputs
lowercase__ : Optional[int]= {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= BeitModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int]= model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : str= model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : str= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[Any]= model_class(snake_case__ )
lowercase__ : List[Any]= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str]= [*signature.parameters.keys()]
lowercase__ : Optional[int]= ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__, lowercase__ : Any= self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple= True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
lowercase__ : str= model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowercase__ : Tuple= self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowercase__ : Union[str, Any]= model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : List[Any]= self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ : Optional[Any]= False
lowercase__ : Any= True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : int= model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
lowercase__ : Union[str, Any]= self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowercase__ : Tuple= model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : int= self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple= _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
lowercase__ : List[Any]= model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple= BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
lowercase__ : Optional[Any]= self.default_image_processor
lowercase__ : Dict= prepare_img()
lowercase__ : Optional[Any]= image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
lowercase__ : str= torch.ones((1, 196) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase__ : int= model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
lowercase__ : Any= outputs.logits
# verify the logits
lowercase__ : str= torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , snake_case__ )
lowercase__ : List[Any]= torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
lowercase__ : Tuple= self.default_image_processor
lowercase__ : int= prepare_img()
lowercase__ : Tuple= image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any]= model(**snake_case__ )
lowercase__ : Any= outputs.logits
# verify the logits
lowercase__ : Union[str, Any]= torch.Size((1, 1000) )
self.assertEqual(logits.shape , snake_case__ )
lowercase__ : int= torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
lowercase__ : str= 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
lowercase__ : List[str]= self.default_image_processor
lowercase__ : str= prepare_img()
lowercase__ : Optional[int]= image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase__ : int= model(**snake_case__ )
lowercase__ : Any= outputs.logits
# verify the logits
lowercase__ : List[str]= torch.Size((1, 21841) )
self.assertEqual(logits.shape , snake_case__ )
lowercase__ : Tuple= torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
lowercase__ : Tuple= 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowercase__ : Optional[int]= model.to(snake_case__ )
lowercase__ : Tuple= BeitImageProcessor(do_resize=snake_case__ , size=640 , do_center_crop=snake_case__ )
lowercase__ : str= load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowercase__ : List[Any]= Image.open(ds[0]["file"] )
lowercase__ : int= image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase__ : Tuple= model(**snake_case__ )
lowercase__ : List[str]= outputs.logits
# verify the logits
lowercase__ : Dict= torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , snake_case__ )
lowercase__ : Tuple= version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
lowercase__ : List[Any]= torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=snake_case__ , )
else:
lowercase__ : Optional[Any]= torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowercase__ : int= model.to(snake_case__ )
lowercase__ : Union[str, Any]= BeitImageProcessor(do_resize=snake_case__ , size=640 , do_center_crop=snake_case__ )
lowercase__ : int= load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowercase__ : str= Image.open(ds[0]["file"] )
lowercase__ : Optional[int]= image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any]= model(**snake_case__ )
lowercase__ : Optional[int]= outputs.logits.detach().cpu()
lowercase__ : List[str]= image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(500, 300)] )
lowercase__ : List[str]= torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , snake_case__ )
lowercase__ : str= image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
lowercase__ : str= torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 85 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 2
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ : int= sigma_max
# setable values
lowercase__ : int= None
lowercase__ : np.IntTensor= None
lowercase__ : torch.FloatTensor= None # sigma(t_i)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[Any]= num_inference_steps
lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ )
lowercase__ : Union[str, Any]= [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : str= 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
lowercase__ : str= sigma + gamma * sigma
lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output
lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : int= sample_prev + sigma_prev * model_output
lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
| 85 | 1 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowercase__(A ) ->List[str]:
"""simple docstring"""
if "cls_token" in name:
lowercase__ : List[str]= name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
lowercase__ : Dict= name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
lowercase__ : int= name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowercase__ : str= name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str= name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : str= name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
lowercase__ : Tuple= name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowercase__ : List[str]= name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
lowercase__ : Tuple= name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Any= name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : List[str]= name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : Any= name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : Dict= name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : int= name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowercase__ : Union[str, Any]= name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowercase__ : Any= name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowercase__ : Optional[int]= name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
lowercase__ : Any= name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
lowercase__ : Optional[Any]= name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[str]= orig_state_dict.pop(A )
if "qkv" in key:
lowercase__ : Tuple= key.split("." )
lowercase__ : Any= int(key_split[1] )
if "decoder_blocks" in key:
lowercase__ : int= config.decoder_hidden_size
lowercase__ : Optional[int]= "decoder.decoder_layers."
if "weight" in key:
lowercase__ : List[str]= val[:dim, :]
lowercase__ : List[str]= val[dim : dim * 2, :]
lowercase__ : int= val[-dim:, :]
elif "bias" in key:
lowercase__ : Optional[Any]= val[:dim]
lowercase__ : Optional[int]= val[dim : dim * 2]
lowercase__ : Optional[int]= val[-dim:]
else:
lowercase__ : List[Any]= config.hidden_size
lowercase__ : str= "vit.encoder.layer."
if "weight" in key:
lowercase__ : Dict= val[:dim, :]
lowercase__ : Any= val[dim : dim * 2, :]
lowercase__ : List[str]= val[-dim:, :]
elif "bias" in key:
lowercase__ : Optional[Any]= val[:dim]
lowercase__ : List[Any]= val[dim : dim * 2]
lowercase__ : Optional[int]= val[-dim:]
else:
lowercase__ : Tuple= val
return orig_state_dict
def lowercase__(A , A ) ->int:
"""simple docstring"""
lowercase__ : List[str]= ViTMAEConfig()
if "large" in checkpoint_url:
lowercase__ : Tuple= 1_024
lowercase__ : int= 4_096
lowercase__ : List[Any]= 24
lowercase__ : Tuple= 16
elif "huge" in checkpoint_url:
lowercase__ : Union[str, Any]= 14
lowercase__ : List[Any]= 1_280
lowercase__ : int= 5_120
lowercase__ : List[str]= 32
lowercase__ : Any= 16
lowercase__ : Optional[Any]= ViTMAEForPreTraining(A )
lowercase__ : List[str]= torch.hub.load_state_dict_from_url(A , map_location="cpu" )["model"]
lowercase__ : List[Any]= ViTMAEImageProcessor(size=config.image_size )
lowercase__ : Optional[Any]= convert_state_dict(A , A )
model.load_state_dict(A )
model.eval()
lowercase__ : Dict= "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
lowercase__ : Optional[Any]= Image.open(requests.get(A , stream=A ).raw )
lowercase__ : Union[str, Any]= ViTMAEImageProcessor(size=config.image_size )
lowercase__ : Optional[Any]= image_processor(images=A , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowercase__ : str= model(**A )
lowercase__ : Dict= outputs.logits
if "large" in checkpoint_url:
lowercase__ : str= torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
lowercase__ : Tuple= torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
lowercase__ : Union[str, Any]= torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , A , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : List[str] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 85 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
a : List[str] = list[list[int]]
# assigning initial values to the grid
a : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase__(A , A , A , A ) ->bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase__(A ) ->tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase__(A ) ->Matrix | None:
"""simple docstring"""
if location := find_empty_location(A ):
lowercase__, lowercase__ : str= location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowercase__ : Any= digit
if sudoku(A ) is not None:
return grid
lowercase__ : Optional[Any]= 0
return None
def lowercase__(A ) ->None:
"""simple docstring"""
for row in grid:
for cell in row:
print(A , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 85 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.unet.config.sample_size
lowercase__ : Dict= (batch_size, 3, img_size, img_size)
lowercase__ : List[Any]= self.unet
lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase__ : Tuple= sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample
lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean
lowercase__ : List[str]= sample_mean.clamp(0 , 1 )
lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : str= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 85 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ : Optional[int]= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase__ : Optional[Any]= PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
lowercase__ : List[Any]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ : Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__ : Optional[int]= CLIPTextModel(snake_case__ )
lowercase__ : List[Any]= CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ : List[Any]= {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
lowercase__ : List[str]= floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase__ : Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : Any= Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" )
if str(snake_case__ ).startswith("mps" ):
lowercase__ : str= torch.manual_seed(snake_case__ )
else:
lowercase__ : Any= torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase__ : List[str]= {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : int= self.get_dummy_components()
lowercase__ : int= StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : int= sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= self.get_dummy_inputs(snake_case__ )
lowercase__ : int= sd_pipe(**snake_case__ ).images
lowercase__ : int= image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : int= np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[str]= self.get_dummy_components()
lowercase__ : int= StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : Union[str, Any]= sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : List[Any]= self.get_dummy_inputs(snake_case__ )
lowercase__ : Tuple= "french fries"
lowercase__ : Dict= sd_pipe(**snake_case__ , negative_prompt=snake_case__ )
lowercase__ : Tuple= output.images
lowercase__ : Optional[Any]= image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : str= np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : Dict= self.get_dummy_components()
lowercase__ : Optional[int]= StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : Tuple= sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Dict= self.get_dummy_inputs(snake_case__ )
lowercase__ : List[Any]= [inputs["prompt"]] * 2
lowercase__ : Optional[int]= np.array(inputs["image"] ).astype(np.floataa ) / 2_55.0
lowercase__ : int= torch.from_numpy(snake_case__ ).unsqueeze(0 ).to(snake_case__ )
lowercase__ : Union[str, Any]= image / 2 + 0.5
lowercase__ : List[str]= image.permute(0 , 3 , 1 , 2 )
lowercase__ : Dict= image.repeat(2 , 1 , 1 , 1 )
lowercase__ : List[Any]= sd_pipe(**snake_case__ ).images
lowercase__ : List[str]= image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase__ : Optional[int]= np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : str= self.get_dummy_components()
lowercase__ : List[str]= EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
lowercase__ : str= StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : List[Any]= sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Optional[int]= self.get_dummy_inputs(snake_case__ )
lowercase__ : Any= sd_pipe(**snake_case__ ).images
lowercase__ : Dict= image[0, -3:, -3:, -1]
lowercase__ : List[Any]= [round(snake_case__ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(snake_case__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[int]= np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dummy_components()
lowercase__ : Any= StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : List[Any]= VaeImageProcessor(do_resize=snake_case__ , do_normalize=snake_case__ )
lowercase__ : List[str]= pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Dict= pipe(**self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" ) )[0]
lowercase__ : Union[str, Any]= components["vae"]
lowercase__ : List[str]= self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase__ : Optional[Any]= vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase__ : str= pipe(**snake_case__ )[0]
lowercase__ : int= np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case__ , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , snake_case__=0 ):
'''simple docstring'''
lowercase__ : Any= torch.manual_seed(snake_case__ )
lowercase__ : str= load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
lowercase__ : str= {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : List[Any]= self.get_inputs()
lowercase__ : Tuple= pipe(**snake_case__ ).images
lowercase__ : List[Any]= image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : Union[str, Any]= np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ )
lowercase__ : int= LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : Dict= self.get_inputs()
lowercase__ : Any= pipe(**snake_case__ ).images
lowercase__ : Dict= image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : Union[str, Any]= np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ )
lowercase__ : Tuple= DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : int= self.get_inputs()
lowercase__ : Tuple= pipe(**snake_case__ ).images
lowercase__ : int= image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : str= np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= 0
def callback_fn(snake_case__ , snake_case__ , snake_case__ ) -> None:
lowercase__ : Union[str, Any]= True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ : Optional[Any]= latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ : Tuple= latents[0, -3:, -3:, -1]
lowercase__ : Union[str, Any]= np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__ : Tuple= latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ : str= latents[0, -3:, -3:, -1]
lowercase__ : Dict= np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__ : Dict= False
lowercase__ : Any= StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa )
lowercase__ : Dict= pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : List[str]= self.get_inputs()
pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Dict= StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa )
lowercase__ : Any= pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ : List[str]= self.get_inputs()
lowercase__ : str= pipe(**snake_case__ )
lowercase__ : Dict= torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ : List[str]= inputs["image"].resize((504, 504) )
lowercase__ : Tuple= "timbrooks/instruct-pix2pix"
lowercase__ : Optional[Any]= StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : Tuple= pipe(**snake_case__ )
lowercase__ : Optional[int]= output.images[0]
lowercase__ : Dict= image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase__ : int= np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
a : List[Any] = 4
a : Union[str, Any] = 3
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
for shard in shards:
for i in range(A ):
yield {"i": i, "shard": shard}
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Optional[int]= int(os.environ["RANK"] )
lowercase__ : Dict= int(os.environ["WORLD_SIZE"] )
lowercase__ : Optional[int]= ArgumentParser()
parser.add_argument("--streaming" , type=A )
parser.add_argument("--local_rank" , type=A )
parser.add_argument("--num_workers" , type=A , default=0 )
lowercase__ : List[str]= parser.parse_args()
lowercase__ : List[str]= args.streaming
lowercase__ : Dict= args.num_workers
lowercase__ : Dict= {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(A )]}
lowercase__ : str= IterableDataset.from_generator(A , gen_kwargs=A )
if not streaming:
lowercase__ : List[str]= Dataset.from_list(list(A ) )
lowercase__ : Optional[int]= split_dataset_by_node(A , rank=A , world_size=A )
lowercase__ : Any= torch.utils.data.DataLoader(A , num_workers=A )
lowercase__ : Union[str, Any]= NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str= full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : List[Any]= sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 85 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
def lowercase__(A , A ) ->Dict:
"""simple docstring"""
_enforce_args(A , A )
if n == 0:
return 0
lowercase__ : List[Any]= float("-inf" )
for i in range(1 , n + 1 ):
lowercase__ : List[str]= max(
A , prices[i - 1] + naive_cut_rod_recursive(n - i , A ) )
return max_revue
def lowercase__(A , A ) ->List[str]:
"""simple docstring"""
_enforce_args(A , A )
lowercase__ : Dict= [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(A , A , A )
def lowercase__(A , A , A ) ->int:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase__ : int= float("-inf" )
for i in range(1 , n + 1 ):
lowercase__ : str= max(
A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , A , A ) , )
lowercase__ : int= max_revenue
return max_rev[n]
def lowercase__(A , A ) ->Any:
"""simple docstring"""
_enforce_args(A , A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase__ : Tuple= [float("-inf" ) for _ in range(n + 1 )]
lowercase__ : int= 0
for i in range(1 , n + 1 ):
lowercase__ : Optional[Any]= max_rev[i]
for j in range(1 , i + 1 ):
lowercase__ : int= max(A , prices[j - 1] + max_rev[i - j] )
lowercase__ : int= max_revenue_i
return max_rev[n]
def lowercase__(A , A ) ->Optional[Any]:
"""simple docstring"""
if n < 0:
lowercase__ : int= f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(A )
if n > len(A ):
lowercase__ : Any= (
"Each integral piece of rod must have a corresponding price. "
f'''Got n = {n} but length of prices = {len(A )}'''
)
raise ValueError(A )
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__ : List[str]= [6, 10, 12, 15, 20, 23]
lowercase__ : Tuple= len(A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase__ : Union[str, Any]= 36
lowercase__ : Tuple= top_down_cut_rod(A , A )
lowercase__ : List[str]= bottom_up_cut_rod(A , A )
lowercase__ : List[Any]= naive_cut_rod_recursive(A , A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 85 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a : int = logging.get_logger(__name__)
a : List[str] = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "gpt_neo"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , snake_case__=50257 , snake_case__=2048 , snake_case__=2048 , snake_case__=24 , snake_case__=[[["global", "local"], 12]] , snake_case__=16 , snake_case__=None , snake_case__=256 , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=50256 , snake_case__=50256 , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Tuple= vocab_size
lowercase__ : List[Any]= max_position_embeddings
lowercase__ : Any= hidden_size
lowercase__ : Any= num_layers
lowercase__ : str= num_heads
lowercase__ : str= intermediate_size
lowercase__ : List[str]= window_size
lowercase__ : str= activation_function
lowercase__ : Optional[Any]= resid_dropout
lowercase__ : Dict= embed_dropout
lowercase__ : Optional[Any]= attention_dropout
lowercase__ : Any= classifier_dropout
lowercase__ : List[str]= layer_norm_epsilon
lowercase__ : Optional[Any]= initializer_range
lowercase__ : int= use_cache
lowercase__ : Union[str, Any]= bos_token_id
lowercase__ : Union[str, Any]= eos_token_id
lowercase__ : Optional[int]= attention_types
lowercase__ : Optional[Any]= self.expand_attention_types_params(snake_case__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
@staticmethod
def UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase__(A , A , A , A ) ->str:
"""simple docstring"""
import torch
lowercase__ : List[str]= input.size()
lowercase__ : Dict= len(A )
lowercase__ : List[str]= shape[dimension]
lowercase__ : List[str]= torch.arange(0 , A , A )
lowercase__ : List[Any]= torch.div(sizedim - size , A , rounding_mode="floor" ) + 1
lowercase__ : Tuple= torch.arange(A ) + low_indices[:min_length][:, None]
lowercase__ : Dict= [slice(A )] * rank
lowercase__ : List[Any]= indices
lowercase__ : List[str]= input[s]
lowercase__ : str= list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A )
def lowercase__(A , A ) ->str:
"""simple docstring"""
import torch
lowercase__ : Tuple= torch.arange(1 , A )
lowercase__ : Optional[int]= torch.remainder(A , A )
lowercase__ : Optional[int]= remainders == 0
lowercase__ : Optional[int]= candidates[divisor_indices]
lowercase__ : int= torch.max(A )
return largest_divisor, torch.div(A , A , rounding_mode="floor" )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
lowercase__ : int= {0: "batch", 1: "past_sequence + sequence"}
else:
lowercase__ : Union[str, Any]= {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self._config.num_heads
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
lowercase__ : int= super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
lowercase__ : Optional[Any]= OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__, lowercase__ : Optional[Any]= common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase__ : str= seqlen + 2
lowercase__ : str= (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ : Optional[int]= [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
lowercase__ : int= common_inputs["attention_mask"]
if self.use_past:
lowercase__ : Optional[Any]= ordered_inputs["attention_mask"].dtype
lowercase__ : Optional[Any]= torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 13
| 85 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=32 , snake_case__=3 , snake_case__=4 , snake_case__=[10, 20, 30, 40] , snake_case__=[2, 2, 3, 2] , snake_case__=True , snake_case__=True , snake_case__=37 , snake_case__="gelu" , snake_case__=10 , snake_case__=0.02 , snake_case__=["stage2", "stage3", "stage4"] , snake_case__=3 , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= parent
lowercase__ : Union[str, Any]= batch_size
lowercase__ : Optional[Any]= image_size
lowercase__ : Dict= num_channels
lowercase__ : Optional[int]= num_stages
lowercase__ : Optional[int]= hidden_sizes
lowercase__ : Optional[Any]= depths
lowercase__ : str= is_training
lowercase__ : Union[str, Any]= use_labels
lowercase__ : Optional[int]= intermediate_size
lowercase__ : str= hidden_act
lowercase__ : Optional[int]= type_sequence_label_size
lowercase__ : int= initializer_range
lowercase__ : int= out_features
lowercase__ : Optional[Any]= num_labels
lowercase__ : List[str]= scope
lowercase__ : List[str]= num_stages
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int= None
if self.use_labels:
lowercase__ : Dict= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[str]= self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case__ , loss_ignore_index=255 , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= UperNetForSemanticSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Dict= config_and_inputs
lowercase__ : Optional[int]= {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__lowerCamelCase = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= UperNetModelTester(self )
lowercase__ : str= ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any]= model_class(snake_case__ )
lowercase__ : Tuple= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str]= [*signature.parameters.keys()]
lowercase__ : Union[str, Any]= ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowercase__ : List[str]= model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase__ : Any= model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase__ : int= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : str= self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__, lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any]= True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Any= True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int= _config_zero_init(snake_case__ )
lowercase__ : Optional[Any]= _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any]= model_class(config=snake_case__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int= UperNetForSemanticSegmentation.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : int= hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
lowercase__ : List[str]= Image.open(A ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
lowercase__ : Tuple= UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case__ )
lowercase__ : Dict= prepare_img()
lowercase__ : int= processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
with torch.no_grad():
lowercase__ : Optional[int]= model(**snake_case__ )
lowercase__ : Optional[int]= torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase__ : Any= torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1e-4 ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
lowercase__ : Optional[Any]= UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case__ )
lowercase__ : str= prepare_img()
lowercase__ : List[Any]= processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
with torch.no_grad():
lowercase__ : Optional[Any]= model(**snake_case__ )
lowercase__ : List[Any]= torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase__ : Optional[int]= torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : str= [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__, lowercase__ : Optional[int]= emb.weight.shape
lowercase__ : List[Any]= nn.Linear(A , A , bias=A )
lowercase__ : Optional[int]= emb.weight.data
return lin_layer
def lowercase__(A ) ->Dict:
"""simple docstring"""
lowercase__ : str= torch.load(A , map_location="cpu" )
lowercase__ : int= mam_aaa["args"] or mam_aaa["cfg"]["model"]
lowercase__ : Dict= mam_aaa["model"]
remove_ignore_keys_(A )
lowercase__ : Union[str, Any]= state_dict["encoder.embed_tokens.weight"].shape[0]
lowercase__ : str= MaMaaaConfig(
vocab_size=A , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
lowercase__ : str= state_dict["decoder.embed_tokens.weight"]
lowercase__ : List[Any]= MaMaaaForConditionalGeneration(A )
model.model.load_state_dict(A , strict=A )
lowercase__ : Dict= make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
a : Any = parser.parse_args()
a : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 85 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 1 |
"""simple docstring"""
import qiskit
def lowercase__(A = 2 ) ->qiskit.result.counts.Counts:
"""simple docstring"""
lowercase__ : Any= qubits
# Using Aer's simulator
lowercase__ : Any= qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
lowercase__ : Any= qiskit.QuantumCircuit(A , A )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A ) ) , list(range(A ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowercase__ : Union[str, Any]= qiskit.execute(A , A , shots=1_000 )
return job.result().get_counts(A )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 85 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 1 |
"""simple docstring"""
a : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a : str = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 85 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : int = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "luke"
def __init__( self , snake_case__=50267 , snake_case__=500000 , snake_case__=768 , snake_case__=256 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=None , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowercase__ : str= vocab_size
lowercase__ : Optional[Any]= entity_vocab_size
lowercase__ : Tuple= hidden_size
lowercase__ : Optional[Any]= entity_emb_size
lowercase__ : List[Any]= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Optional[Any]= hidden_act
lowercase__ : Union[str, Any]= intermediate_size
lowercase__ : Union[str, Any]= hidden_dropout_prob
lowercase__ : Optional[Any]= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : int= type_vocab_size
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= layer_norm_eps
lowercase__ : int= use_entity_aware_attention
lowercase__ : Optional[Any]= classifier_dropout
| 85 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a : int = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
a : Any = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
a : Optional[Any] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="auto" , snake_case__=-1 , snake_case__=0.9 , snake_case__=5 , snake_case__=500 , snake_case__="gpt2-large" , snake_case__=-1 , snake_case__=1024 , snake_case__=25 , snake_case__=5 , snake_case__=True , snake_case__=25 , ):
'''simple docstring'''
lowercase__ : str= compute_mauve(
p_text=snake_case__ , q_text=snake_case__ , p_features=snake_case__ , q_features=snake_case__ , p_tokens=snake_case__ , q_tokens=snake_case__ , num_buckets=snake_case__ , pca_max_data=snake_case__ , kmeans_explained_var=snake_case__ , kmeans_num_redo=snake_case__ , kmeans_max_iter=snake_case__ , featurize_model_name=snake_case__ , device_id=snake_case__ , max_text_length=snake_case__ , divergence_curve_discretization_size=snake_case__ , mauve_scaling_factor=snake_case__ , verbose=snake_case__ , seed=snake_case__ , )
return out
| 85 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 1 |
"""simple docstring"""
def lowercase__(A = 200 ) ->int:
"""simple docstring"""
lowercase__ : Union[str, Any]= [1, 2, 5, 10, 20, 50, 100, 200]
lowercase__ : int= [0] * (pence + 1)
lowercase__ : Optional[int]= 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 85 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
lowercase__ : List[Any]= config_class.from_json_file(A )
lowercase__ : Any= True
lowercase__ : List[str]= True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ : Optional[int]= model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ : List[str]= cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network
lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" )
lowercase__ : Union[str, Any]= pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
lowercase__ : str= pt_model(**pt_model.dummy_inputs )
lowercase__ : Tuple= pto[0].numpy()
lowercase__ : List[Any]= tfo[0].numpy()
lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A , save_format="h5" )
def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]:
"""simple docstring"""
if args_model_type is None:
lowercase__ : Tuple= list(MODEL_CLASSES.keys() )
else:
lowercase__ : Optional[int]= [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(A )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ : int= list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ : Any= model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ : Any= model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Union[str, Any]= config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ : str= cached_file(A , A , force_download=not use_cached_models )
else:
lowercase__ : Any= model_shortcut_name
if os.path.isfile(A ):
lowercase__ : Dict= "converted_model"
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
import bisect
def lowercase__(A , A , A = 0 , A = -1 ) ->int:
"""simple docstring"""
if hi < 0:
lowercase__ : Optional[int]= len(A )
while lo < hi:
lowercase__ : Optional[Any]= lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase__ : Optional[Any]= mid + 1
else:
lowercase__ : Optional[Any]= mid
return lo
def lowercase__(A , A , A = 0 , A = -1 ) ->int:
"""simple docstring"""
if hi < 0:
lowercase__ : str= len(A )
while lo < hi:
lowercase__ : Dict= lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase__ : Optional[int]= mid + 1
else:
lowercase__ : Optional[Any]= mid
return lo
def lowercase__(A , A , A = 0 , A = -1 ) ->None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A , A , A , A ) , A )
def lowercase__(A , A , A = 0 , A = -1 ) ->None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A , A , A , A ) , A )
def lowercase__(A , A ) ->int | None:
"""simple docstring"""
lowercase__ : Any= 0
lowercase__ : Optional[int]= len(A ) - 1
while left <= right:
lowercase__ : str= left + (right - left) // 2
lowercase__ : Optional[Any]= sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase__ : Optional[Any]= midpoint - 1
else:
lowercase__ : Tuple= midpoint + 1
return None
def lowercase__(A , A ) ->int | None:
"""simple docstring"""
lowercase__ : Any= bisect.bisect_left(A , A )
if index != len(A ) and sorted_collection[index] == item:
return index
return None
def lowercase__(A , A , A , A ) ->int | None:
"""simple docstring"""
if right < left:
return None
lowercase__ : Any= left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A , A , A , midpoint - 1 )
else:
return binary_search_by_recursion(A , A , midpoint + 1 , A )
if __name__ == "__main__":
a : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
a : Optional[int] = sorted(int(item) for item in user_input.split(""","""))
a : Union[str, Any] = int(input("""Enter a single number to be found in the list:\n"""))
a : Optional[int] = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : str = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "markuplm"
def __init__( self , snake_case__=30522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=0 , snake_case__=2 , snake_case__=256 , snake_case__=1024 , snake_case__=216 , snake_case__=1001 , snake_case__=32 , snake_case__=50 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Optional[Any]= vocab_size
lowercase__ : Any= hidden_size
lowercase__ : Dict= num_hidden_layers
lowercase__ : str= num_attention_heads
lowercase__ : Any= hidden_act
lowercase__ : Union[str, Any]= intermediate_size
lowercase__ : int= hidden_dropout_prob
lowercase__ : Optional[Any]= attention_probs_dropout_prob
lowercase__ : List[str]= max_position_embeddings
lowercase__ : Tuple= type_vocab_size
lowercase__ : Tuple= initializer_range
lowercase__ : Optional[int]= layer_norm_eps
lowercase__ : List[Any]= position_embedding_type
lowercase__ : Optional[Any]= use_cache
lowercase__ : Tuple= classifier_dropout
# additional properties
lowercase__ : Union[str, Any]= max_depth
lowercase__ : int= max_xpath_tag_unit_embeddings
lowercase__ : Optional[Any]= max_xpath_subs_unit_embeddings
lowercase__ : Any= tag_pad_id
lowercase__ : str= subs_pad_id
lowercase__ : Union[str, Any]= xpath_unit_hidden_size
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a : Optional[int] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = 8 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Optional[Any]= do_rescale
lowercase__ : Optional[int]= rescale_factor
lowercase__ : Dict= do_pad
lowercase__ : Dict= pad_size
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ):
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__, lowercase__ : List[str]= get_image_size(snake_case__ )
lowercase__ : int= (old_height // size + 1) * size - old_height
lowercase__ : int= (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Dict= do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : str= rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Optional[int]= do_pad if do_pad is not None else self.do_pad
lowercase__ : int= pad_size if pad_size is not None else self.pad_size
lowercase__ : Optional[Any]= make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : int= [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
lowercase__ : Tuple= [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
lowercase__ : Optional[Any]= [self.pad(snake_case__ , size=snake_case__ ) for image in images]
lowercase__ : Dict= [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowercase__ : Dict= {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a : int = logging.get_logger(__name__)
def lowercase__(A ) ->List[int]:
"""simple docstring"""
if isinstance(A , np.ndarray ):
return list(tensor.shape )
lowercase__ : Optional[int]= tf.shape(A )
if tensor.shape == tf.TensorShape(A ):
return dynamic
lowercase__ : List[Any]= tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(A )]
def lowercase__(A , A = None , A = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=A , name=A )
def lowercase__(A , A , A , A=1e-5 , A=-1 ) ->str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(A , A ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowercase__, lowercase__ : str= tf.nn.moments(A , axes=[axis] , keepdims=A )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase__ : Dict= [1] * inputs.shape.rank
lowercase__ : List[Any]= shape_list(A )[axis]
lowercase__ : Optional[Any]= tf.reshape(A , A )
lowercase__ : Dict= tf.reshape(A , A )
# Compute layer normalization using the batch_normalization
# function.
lowercase__ : Union[str, Any]= tf.nn.batch_normalization(
A , A , A , offset=A , scale=A , variance_epsilon=A , )
return outputs
def lowercase__(A , A=0 , A=-1 ) ->Any:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase__ : Dict= tf.shape(A )
lowercase__ : List[Any]= tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase__ : Optional[int]= tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(A , A )
def lowercase__(A ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(A , tf.Tensor ):
lowercase__ : Tuple= tf.convert_to_tensor(A ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase__ : Dict= encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase__ : Optional[int]= encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase__ : int= (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase__(A , A , A = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
A , tf.cast(A , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(A )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowercase__(A , A , A ) ->Any:
"""simple docstring"""
lowercase__ : Union[str, Any]= 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase__ : Optional[int]= [x for x in data if len(A ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
lowercase__ : Dict= np.asarray(A )
lowercase__ : Optional[Any]= 1
lowercase__ : str= np.array_split(A , A )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase__ : Union[str, Any]= np.array_split(A , A )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(A ):
lowercase__ : List[Any]= chunk_data
else:
lowercase__ : List[str]= data
def lowercase__(A , A ) ->Optional[Any]:
"""simple docstring"""
if name in group.attrs:
lowercase__ : Tuple= [n.decode("utf8" ) if hasattr(A , "decode" ) else n for n in group.attrs[name]]
else:
lowercase__ : List[str]= []
lowercase__ : Dict= 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(A , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase__(A ) ->Any:
"""simple docstring"""
def _expand_single_ad_tensor(A ):
if isinstance(A , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(A , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , A )
| 85 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 1 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
lowercase__ : List[Any]= int(A )
if n_element < 1:
lowercase__ : Any= ValueError("a should be a positive number" )
raise my_error
lowercase__ : Any= [1]
lowercase__, lowercase__, lowercase__ : Optional[int]= (0, 0, 0)
lowercase__ : List[Any]= 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
a : str = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 85 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
lowercase__ : str= []
for part_id in partition_order:
lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Dict= Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(2 )
lowercase__ : Optional[Any]= [1, 0]
lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions.
lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Dict= spark.range(10 ).repartition(1 )
lowercase__ : str= SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->str:
"""simple docstring"""
lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int= spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : Optional[Any]= lambda A : x.reverse()
lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Any:
"""simple docstring"""
lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple= spark.range(100 ).repartition(1 )
lowercase__ : Optional[int]= Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 85 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a : Union[str, Any] = HfArgumentParser(InitializationArguments)
a : str = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a : Any = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
a : str = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a : Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 85 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase__), ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 | 1 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= logging.get_logger()
# the current default level is logging.WARNING
lowercase__ : Tuple= logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= logging.get_verbosity()
lowercase__ : List[str]= logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase__ : List[Any]= "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case__ ) as cl:
logger.warning(snake_case__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case__ ) as cl:
logger.warning(snake_case__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case__ ) as cl:
logger.warning(snake_case__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(snake_case__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowercase__ : Dict= logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase__ : str= os.getenv("TRANSFORMERS_VERBOSITY" , snake_case__ )
lowercase__ : Union[str, Any]= logging.log_levels[env_level_str]
lowercase__ : int= logging.get_verbosity()
self.assertEqual(
snake_case__ , snake_case__ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
lowercase__ : Union[str, Any]= ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
lowercase__ : str= logging.logging.getLogger()
with CaptureLogger(snake_case__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
lowercase__ : Dict= logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase__ : int= "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case__ ) as cl:
logger.warning_advice(snake_case__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case__ ) as cl:
logger.warning_advice(snake_case__ )
self.assertEqual(cl.out , msg + "\n" )
def lowercase__() ->List[Any]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 85 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 2
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ : int= sigma_max
# setable values
lowercase__ : int= None
lowercase__ : np.IntTensor= None
lowercase__ : torch.FloatTensor= None # sigma(t_i)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[Any]= num_inference_steps
lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ )
lowercase__ : Union[str, Any]= [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : str= 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
lowercase__ : str= sigma + gamma * sigma
lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output
lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : int= sample_prev + sigma_prev * model_output
lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
| 85 | 1 |
"""simple docstring"""
a : Dict = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 85 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : List[str] = logging.get_logger(__name__)
a : Optional[int] = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "deta"
__lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , snake_case__=None , snake_case__=900 , snake_case__=2048 , snake_case__=6 , snake_case__=2048 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__=5 , snake_case__=4 , snake_case__=4 , snake_case__=True , snake_case__=300 , snake_case__=True , snake_case__=True , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.25 , **snake_case__ , ):
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int]= CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(snake_case__ , snake_case__ ):
lowercase__ : Union[str, Any]= backbone_config.pop("model_type" )
lowercase__ : Optional[int]= CONFIG_MAPPING[backbone_model_type]
lowercase__ : Tuple= config_class.from_dict(snake_case__ )
lowercase__ : List[str]= backbone_config
lowercase__ : List[str]= num_queries
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : Union[str, Any]= d_model
lowercase__ : Tuple= encoder_ffn_dim
lowercase__ : List[Any]= encoder_layers
lowercase__ : Dict= encoder_attention_heads
lowercase__ : List[Any]= decoder_ffn_dim
lowercase__ : Any= decoder_layers
lowercase__ : Dict= decoder_attention_heads
lowercase__ : Optional[int]= dropout
lowercase__ : List[Any]= attention_dropout
lowercase__ : List[Any]= activation_dropout
lowercase__ : int= activation_function
lowercase__ : Optional[Any]= init_std
lowercase__ : str= init_xavier_std
lowercase__ : Tuple= encoder_layerdrop
lowercase__ : Union[str, Any]= auxiliary_loss
lowercase__ : Any= position_embedding_type
# deformable attributes
lowercase__ : int= num_feature_levels
lowercase__ : Union[str, Any]= encoder_n_points
lowercase__ : str= decoder_n_points
lowercase__ : str= two_stage
lowercase__ : Optional[int]= two_stage_num_proposals
lowercase__ : Any= with_box_refine
lowercase__ : int= assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : int= class_cost
lowercase__ : Any= bbox_cost
lowercase__ : int= giou_cost
# Loss coefficients
lowercase__ : List[Any]= mask_loss_coefficient
lowercase__ : List[str]= dice_loss_coefficient
lowercase__ : Optional[int]= bbox_loss_coefficient
lowercase__ : Any= giou_loss_coefficient
lowercase__ : Any= eos_coefficient
lowercase__ : List[str]= focal_alpha
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= copy.deepcopy(self.__dict__ )
lowercase__ : int= self.backbone_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BlipImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= False
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
lowercase__ : List[str]= self.tokenizer
lowercase__ : Union[str, Any]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowercase__ : Any= self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowercase__ : Tuple= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowercase__ : List[str]= None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.tokenizer.model_input_names
lowercase__ : Optional[int]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.unet.config.sample_size
lowercase__ : Dict= (batch_size, 3, img_size, img_size)
lowercase__ : List[Any]= self.unet
lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase__ : Tuple= sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample
lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample
lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean
lowercase__ : List[str]= sample_mean.clamp(0 , 1 )
lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : str= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 85 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : List[str] = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "levit"
def __init__( self , snake_case__=224 , snake_case__=3 , snake_case__=3 , snake_case__=2 , snake_case__=1 , snake_case__=16 , snake_case__=[128, 256, 384] , snake_case__=[4, 8, 12] , snake_case__=[4, 4, 4] , snake_case__=[16, 16, 16] , snake_case__=0 , snake_case__=[2, 2, 2] , snake_case__=[2, 2, 2] , snake_case__=0.02 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Optional[int]= image_size
lowercase__ : Any= num_channels
lowercase__ : int= kernel_size
lowercase__ : int= stride
lowercase__ : int= padding
lowercase__ : str= hidden_sizes
lowercase__ : str= num_attention_heads
lowercase__ : int= depths
lowercase__ : Dict= key_dim
lowercase__ : int= drop_path_rate
lowercase__ : Dict= patch_size
lowercase__ : List[str]= attention_ratio
lowercase__ : Optional[int]= mlp_ratio
lowercase__ : str= initializer_range
lowercase__ : int= [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = version.parse("1.11" )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 1e-4
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 1 |
"""simple docstring"""
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= [1]
lowercase__, lowercase__, lowercase__ : List[Any]= 0, 0, 0
lowercase__ : Dict= ugly_nums[ia] * 2
lowercase__ : Optional[int]= ugly_nums[ia] * 3
lowercase__ : str= ugly_nums[ia] * 5
for _ in range(1 , A ):
lowercase__ : Tuple= min(A , A , A )
ugly_nums.append(A )
if next_num == next_a:
ia += 1
lowercase__ : Any= ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase__ : Dict= ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase__ : List[str]= ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 85 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=True , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : int= config_class
lowercase__ : str= has_text_modality
lowercase__ : List[Any]= kwargs
lowercase__ : Union[str, Any]= common_properties
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.config_class(**self.inputs_dict )
lowercase__ : Any= (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case__ , snake_case__ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case__ ):
try:
setattr(snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F'''`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case__ ):
try:
lowercase__ : Tuple= self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=F'''`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.config_class(**self.inputs_dict )
lowercase__ : str= json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : List[Any]= os.path.join(snake_case__ , "config.json" )
config_first.to_json_file(snake_case__ )
lowercase__ : Any= self.config_class.from_json_file(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case__ )
lowercase__ : Tuple= self.config_class.from_pretrained(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.config_class(**self.inputs_dict )
lowercase__ : List[str]= "test"
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : int= os.path.join(snake_case__ , snake_case__ )
config_first.save_pretrained(snake_case__ )
lowercase__ : List[str]= self.config_class.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowercase__ : Dict= 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.config_class.is_composition:
return
lowercase__ : Optional[int]= self.config_class()
self.parent.assertIsNotNone(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= copy.deepcopy(snake_case__ )
lowercase__ : Union[str, Any]= self.config_class(**snake_case__ )
lowercase__ : str= []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(snake_case__ , snake_case__ ) != value:
wrong_values.append((key, getattr(snake_case__ , snake_case__ ), value) )
if len(snake_case__ ) > 0:
lowercase__ : str= "\n".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 85 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase__(A ) ->dict:
"""simple docstring"""
lowercase__ : Optional[Any]= f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(A ).json()
def lowercase__(A = 10 ) ->list[dict]:
"""simple docstring"""
lowercase__ : Union[str, Any]= "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowercase__ : Optional[Any]= requests.get(A ).json()[:max_stories]
return [get_hackernews_story(A ) for story_id in story_ids]
def lowercase__(A = 10 ) ->str:
"""simple docstring"""
lowercase__ : Optional[Any]= hackernews_top_stories(A )
return "\n".join("* [{title}]({url})".format(**A ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A , A ) ->bool:
"""simple docstring"""
lowercase__ : Union[str, Any]= get_failure_array(A )
# 2) Step through text searching for pattern
lowercase__, lowercase__ : Union[str, Any]= 0, 0 # index into text, pattern
while i < len(A ):
if pattern[j] == text[i]:
if j == (len(A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase__ : str= failure[j - 1]
continue
i += 1
return False
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : Tuple= [0]
lowercase__ : List[str]= 0
lowercase__ : Optional[int]= 1
while j < len(A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase__ : Dict= failure[i - 1]
continue
j += 1
failure.append(A )
return failure
if __name__ == "__main__":
# Test 1)
a : Any = """abc1abc12"""
a : int = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
a : Dict = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
a : str = """ABABX"""
a : List[str] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
a : Dict = """AAAB"""
a : int = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
a : Optional[int] = """abcdabcy"""
a : str = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
a : Tuple = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 85 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "BridgeTowerImageProcessor"
__lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
lowercase__ : Optional[int]= self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.tokenizer.model_input_names
lowercase__ : List[Any]= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 | 1 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , ):
'''simple docstring'''
super().__init__()
lowercase__ : List[str]= nn.Embedding(snake_case__ , snake_case__ )
lowercase__ : str= nn.Embedding(snake_case__ , snake_case__ )
lowercase__ : Tuple= False
lowercase__ : Union[str, Any]= nn.Dropout(p=snake_case__ )
lowercase__ : Optional[int]= TaConfig(
vocab_size=snake_case__ , d_model=snake_case__ , num_heads=snake_case__ , d_kv=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , feed_forward_proj=snake_case__ , is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , )
lowercase__ : Union[str, Any]= nn.ModuleList()
for lyr_num in range(snake_case__ ):
lowercase__ : Tuple= TaBlock(snake_case__ )
self.encoders.append(snake_case__ )
lowercase__ : List[Any]= TaLayerNorm(snake_case__ )
lowercase__ : Optional[int]= nn.Dropout(p=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= self.token_embedder(snake_case__ )
lowercase__ : Any= encoder_input_tokens.shape[1]
lowercase__ : Tuple= torch.arange(snake_case__ , device=encoder_input_tokens.device )
x += self.position_encoding(snake_case__ )
lowercase__ : Dict= self.dropout_pre(snake_case__ )
# inverted the attention mask
lowercase__ : Union[str, Any]= encoder_input_tokens.size()
lowercase__ : Dict= self.get_extended_attention_mask(snake_case__ , snake_case__ )
for lyr in self.encoders:
lowercase__ : int= lyr(snake_case__ , snake_case__ )[0]
lowercase__ : Tuple= self.layer_norm(snake_case__ )
return self.dropout_post(snake_case__ ), encoder_inputs_mask
| 85 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 1 |
"""simple docstring"""
def lowercase__(A = 10 , A = 1_000 , A = True ) ->int:
"""simple docstring"""
assert (
isinstance(A , A )
and isinstance(A , A )
and isinstance(A , A )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def lowercase__(A , A ) ->int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def lowercase__(A , A , A ) ->None:
"""simple docstring"""
assert (
isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(A ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
lowercase__ : List[Any]= lower
lowercase__ : int= higher
lowercase__ : Optional[int]= []
while True:
lowercase__ : Dict= get_avg(A , A )
last_numbers.append(A )
if answer(A ) == "low":
lowercase__ : str= number
elif answer(A ) == "high":
lowercase__ : Any= number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def lowercase__() ->None:
"""simple docstring"""
lowercase__ : Union[str, Any]= int(input("Enter lower value : " ).strip() )
lowercase__ : Any= int(input("Enter high value : " ).strip() )
lowercase__ : str= int(input("Enter value to guess : " ).strip() )
guess_the_number(A , A , A )
if __name__ == "__main__":
main()
| 85 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : Tuple= batch_size
lowercase__ : Tuple= seq_length
lowercase__ : str= is_training
lowercase__ : str= use_input_lengths
lowercase__ : Any= use_token_type_ids
lowercase__ : List[Any]= use_labels
lowercase__ : Optional[int]= gelu_activation
lowercase__ : str= sinusoidal_embeddings
lowercase__ : List[str]= causal
lowercase__ : Any= asm
lowercase__ : Optional[int]= n_langs
lowercase__ : Union[str, Any]= vocab_size
lowercase__ : int= n_special
lowercase__ : Any= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : List[str]= hidden_dropout_prob
lowercase__ : str= attention_probs_dropout_prob
lowercase__ : Any= max_position_embeddings
lowercase__ : List[Any]= type_vocab_size
lowercase__ : int= type_sequence_label_size
lowercase__ : Any= initializer_range
lowercase__ : Optional[int]= num_labels
lowercase__ : Union[str, Any]= num_choices
lowercase__ : List[Any]= summary_type
lowercase__ : Optional[int]= use_proj
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple= None
if self.use_input_lengths:
lowercase__ : List[Any]= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : Tuple= None
if self.use_token_type_ids:
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : str= None
lowercase__ : Tuple= None
lowercase__ : Dict= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any]= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : Any= FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase__ : str= model(snake_case__ , langs=snake_case__ )
lowercase__ : Any= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[str]= model(snake_case__ )
lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ )
lowercase__ : Any= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase__ : List[str]= model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple()
lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase__), ) : List[Any]= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
lowercase__ : int= self.num_choices
lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any= model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Tuple= {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : List[Any]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase__ : List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : int= True
lowercase__ : List[Any]= model_class(config=snake_case__ )
lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ )
lowercase__ : Dict= torch.jit.trace(
snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) )
lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ )
loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__(A , A , A ) ->dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=5 , snake_case__=4 , snake_case__=64 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= parent
lowercase__ : Any= batch_size
lowercase__ : List[Any]= seq_length
lowercase__ : List[str]= is_training
lowercase__ : int= use_input_mask
lowercase__ : Optional[Any]= use_token_type_ids
lowercase__ : Dict= use_labels
lowercase__ : Tuple= vocab_size
lowercase__ : List[Any]= hidden_size
lowercase__ : int= num_hidden_layers
lowercase__ : Union[str, Any]= num_attention_heads
lowercase__ : Any= intermediate_size
lowercase__ : str= hidden_act
lowercase__ : Any= hidden_dropout_prob
lowercase__ : Optional[Any]= attention_probs_dropout_prob
lowercase__ : Tuple= max_position_embeddings
lowercase__ : str= type_vocab_size
lowercase__ : List[Any]= type_sequence_label_size
lowercase__ : List[Any]= initializer_range
lowercase__ : Dict= num_labels
lowercase__ : List[Any]= num_choices
lowercase__ : int= scope
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : List[str]= None
if self.use_input_mask:
lowercase__ : Any= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict= None
lowercase__ : Optional[Any]= None
lowercase__ : List[Any]= None
if self.use_labels:
lowercase__ : Any= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : List[str]= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Union[str, Any]= self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[str]= MPNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Dict= model(snake_case__ , snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= MPNetForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[Any]= model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= self.num_labels
lowercase__ : int= MPNetForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.num_choices
lowercase__ : int= MPNetForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Optional[int]= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[Any]= model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= self.num_labels
lowercase__ : Any= MPNetForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : List[Any]= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.prepare_config_and_inputs()
((lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__)) : Optional[Any]= config_and_inputs
lowercase__ : Optional[int]= {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= MPNetModelTester(self )
lowercase__ : Union[str, Any]= ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ )
@require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= MPNetModel.from_pretrained("microsoft/mpnet-base" )
lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ : Optional[int]= model(snake_case__ )[0]
lowercase__ : Optional[int]= torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Dict= torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 85 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.