code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from math import factorial, radians
def _snake_case ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ) -> Tuple:
lowerCAmelCase : Dict = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
lowerCAmelCase : Dict = radians(A__ )
lowerCAmelCase : int = angle_in_radians
lowerCAmelCase : Tuple = 3
lowerCAmelCase : str = -1
for _ in range(A__ ):
result += (b * (angle_in_radians**a)) / factorial(A__ )
lowerCAmelCase : List[str] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(A__ , A__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 357
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ):
lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ )
lowerCAmelCase : Any = nsfw_detected.flatten()
lowerCAmelCase : Dict = nsfw_detected > p_threshold
lowerCAmelCase : int = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowerCAmelCase : List[Any] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Union[str, Any] = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 314
| 0
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case_:
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Any ):
pass
@is_pipeline_test
@require_vision
class snake_case_( unittest.TestCase ):
@require_torch
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCAmelCase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase : Optional[int] = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCAmelCase : Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCAmelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase : int = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCAmelCase : int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase : Union[str, Any] = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase : Dict = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase : int = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCAmelCase : List[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 358
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Union[str, Any] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
snake_case__ : Optional[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BertTokenizer
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCAmelCase : Tuple = do_lower_case
lowerCAmelCase : Union[str, Any] = strip_accents
lowerCAmelCase : Tuple = tokenize_chinese_chars
lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ):
lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_( SCREAMING_SNAKE_CASE__ ):
pass
class snake_case_:
def __init__( self : Optional[int] , UpperCamelCase_ : Any ):
lowerCAmelCase : Dict = data
lowerCAmelCase : List[str] = None
def __iter__( self : List[str] ):
lowerCAmelCase : Dict = self
lowerCAmelCase : List[Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(a_ )
yield node.data
lowerCAmelCase : Union[str, Any] = node.next_node
@property
def lowerCamelCase__ ( self : List[Any] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case__ : List[Any] = Node(1)
snake_case__ : List[Any] = Node(2)
snake_case__ : Any = Node(3)
snake_case__ : Optional[int] = Node(4)
print(root_node.has_loop) # False
snake_case__ : Dict = root_node.next_node
print(root_node.has_loop) # True
snake_case__ : List[str] = Node(5)
snake_case__ : str = Node(6)
snake_case__ : Tuple = Node(5)
snake_case__ : Tuple = Node(6)
print(root_node.has_loop) # False
snake_case__ : Any = Node(1)
print(root_node.has_loop) # False
| 359
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = RobertaTokenizer
__UpperCamelCase = RobertaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = {'cls_token': '<s>'}
def lowerCamelCase__ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase : int = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCAmelCase : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase : Dict = {'''unk_token''': '''<unk>'''}
lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def lowerCamelCase__ ( self : Dict , **UpperCamelCase_ : Tuple ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCamelCase__ ( self : str , **UpperCamelCase_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Dict ):
lowerCAmelCase : Optional[int] = '''lower newer'''
lowerCAmelCase : Optional[int] = '''lower newer'''
return input_text, output_text
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase : int = '''lower newer'''
lowerCAmelCase : Any = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase : int = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase : int = tokens + [tokenizer.unk_token]
lowerCAmelCase : Any = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained('''roberta-base''' )
lowerCAmelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = self.get_tokenizer()
lowerCAmelCase : Optional[Any] = '''Encode this sequence.'''
lowerCAmelCase : Optional[int] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCAmelCase : str = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCAmelCase : Dict = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
lowerCAmelCase : Dict = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
lowerCAmelCase : str = '''Encode <mask> sequence'''
lowerCAmelCase : Optional[Any] = '''Encode <mask>sequence'''
lowerCAmelCase : Dict = tokenizer.encode(__lowerCAmelCase )
lowerCAmelCase : List[str] = encoded.index(__lowerCAmelCase )
lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCAmelCase )
lowerCAmelCase : Optional[Any] = encoded.index(__lowerCAmelCase )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( self : Any ):
pass
def lowerCamelCase__ ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase : str = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase : List[Any] = '''A, <mask> AllenNLP sentence.'''
lowerCAmelCase : Optional[Any] = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
lowerCAmelCase : Dict = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCamelCase__ ( self : List[str] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __lowerCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __lowerCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __lowerCAmelCase )
def lowerCamelCase__ ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase : Any = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase : Union[str, Any] = F'''{text_of_1_token} {text_of_1_token}'''
lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : int = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : List[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : List[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : List[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Union[str, Any] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : List[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : Optional[int] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase : Any = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 360
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 50000000 ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) )
lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) )
for primea in primes:
lowerCAmelCase : Optional[Any] = primea * primea
for primea in primes:
lowerCAmelCase : List[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCAmelCase : Tuple = primea * primea * primea * primea
lowerCAmelCase : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
snake_case__ : Union[str, Any] = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class snake_case_( unittest.TestCase , a__ ):
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : str = load_tool('''text-question-answering''' )
self.tool.setup()
lowerCAmelCase : Any = load_tool('''text-question-answering''' , remote=_snake_case )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Union[str, Any] = self.tool(_snake_case , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.remote_tool(_snake_case , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = self.tool(text=_snake_case , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = self.remote_tool(text=_snake_case , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' )
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Tuple = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor''']
snake_case__ : List[Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
snake_case__ : Optional[Any] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class snake_case_( __snake_case ):
__UpperCamelCase = 'mobilenet_v2'
def __init__( self : Tuple , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : Any=2_2_4 , UpperCamelCase_ : Any=1.0 , UpperCamelCase_ : List[str]=8 , UpperCamelCase_ : Dict=8 , UpperCamelCase_ : List[Any]=6 , UpperCamelCase_ : Optional[int]=3_2 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]="relu6" , UpperCamelCase_ : int=True , UpperCamelCase_ : List[Any]=0.8 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : Tuple=0.001 , UpperCamelCase_ : Optional[Any]=2_5_5 , **UpperCamelCase_ : Optional[Any] , ):
super().__init__(**UpperCamelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Dict = image_size
lowerCAmelCase : Optional[Any] = depth_multiplier
lowerCAmelCase : Any = depth_divisible_by
lowerCAmelCase : Tuple = min_depth
lowerCAmelCase : int = expand_ratio
lowerCAmelCase : Union[str, Any] = output_stride
lowerCAmelCase : List[str] = first_layer_is_expansion
lowerCAmelCase : Dict = finegrained_output
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = tf_padding
lowerCAmelCase : List[str] = classifier_dropout_prob
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : str = layer_norm_eps
lowerCAmelCase : Optional[Any] = semantic_loss_ignore_index
class snake_case_( __snake_case ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Dict ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowerCamelCase__ ( self : Dict ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowerCamelCase__ ( self : Optional[int] ):
return 1E-4
| 362
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ):
lowerCAmelCase : Tuple = '''bilinear'''
lowerCAmelCase : List[Any] = max_size
lowerCAmelCase : Optional[int] = short_edge_length
def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = []
for img in imgs:
lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w
else:
lowerCAmelCase, lowerCAmelCase : int = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = newh * scale
lowerCAmelCase : str = neww * scale
lowerCAmelCase : Union[str, Any] = int(neww + 0.5 )
lowerCAmelCase : str = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCAmelCase : Optional[int] = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class snake_case_:
def __init__( self : Tuple , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT
lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY
lowerCAmelCase : int = cfg.PAD_VALUE
lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE
lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
lowerCAmelCase : Dict = [im.shape[-2:] for im in images]
lowerCAmelCase : Dict = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ):
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : List[Any] = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] )
lowerCAmelCase : str = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( _snake_case : str , _snake_case : List[Any] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ):
assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!"
lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=_snake_case )
tensor[:, 1].clamp_(min=0 , max=_snake_case )
tensor[:, 2].clamp_(min=0 , max=_snake_case )
tensor[:, 3].clamp_(min=0 , max=_snake_case )
| 314
| 0
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case__ : int = False
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = '''ybelkada/fonts'''
def _snake_case ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[str] ):
requires_backends(UpperCamelCase__ , ['''torch'''] )
_check_torch_version()
lowerCAmelCase : Any = image_tensor.unsqueeze(0 )
lowerCAmelCase : Any = torch.nn.functional.unfold(UpperCamelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowerCAmelCase : Optional[Any] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase__ , UpperCamelCase__ , -1 )
lowerCAmelCase : Optional[int] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _snake_case ( _snake_case : str , _snake_case : int = 36 , _snake_case : str = "black" , _snake_case : str = "white" , _snake_case : int = 5 , _snake_case : int = 5 , _snake_case : int = 5 , _snake_case : int = 5 , _snake_case : Optional[bytes] = None , _snake_case : Optional[str] = None , ):
requires_backends(UpperCamelCase__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
lowerCAmelCase : Optional[Any] = textwrap.TextWrapper(width=80 )
lowerCAmelCase : List[Any] = wrapper.wrap(text=UpperCamelCase__ )
lowerCAmelCase : Dict = '''\n'''.join(UpperCamelCase__ )
if font_bytes is not None and font_path is None:
lowerCAmelCase : List[Any] = io.BytesIO(UpperCamelCase__ )
elif font_path is not None:
lowerCAmelCase : Union[str, Any] = font_path
else:
lowerCAmelCase : List[Any] = hf_hub_download(UpperCamelCase__ , '''Arial.TTF''' )
lowerCAmelCase : Tuple = ImageFont.truetype(UpperCamelCase__ , encoding='''UTF-8''' , size=UpperCamelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCAmelCase : Dict = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , UpperCamelCase__ ) )
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = temp_draw.textbbox((0, 0) , UpperCamelCase__ , UpperCamelCase__ )
# Create the actual image with a bit of padding around the text.
lowerCAmelCase : Dict = text_width + left_padding + right_padding
lowerCAmelCase : Dict = text_height + top_padding + bottom_padding
lowerCAmelCase : int = Image.new('''RGB''' , (image_width, image_height) , UpperCamelCase__ )
lowerCAmelCase : Any = ImageDraw.Draw(UpperCamelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase__ , fill=UpperCamelCase__ , font=UpperCamelCase__ )
return image
def _snake_case ( _snake_case : np.ndarray , _snake_case : str , **_snake_case : Any ):
requires_backends(UpperCamelCase__ , '''vision''' )
# Convert to PIL image if necessary
lowerCAmelCase : str = to_pil_image(UpperCamelCase__ )
lowerCAmelCase : str = render_text(UpperCamelCase__ , **UpperCamelCase__ )
lowerCAmelCase : Optional[int] = max(header_image.width , image.width )
lowerCAmelCase : Union[str, Any] = int(image.height * (new_width / image.width) )
lowerCAmelCase : int = int(header_image.height * (new_width / header_image.width) )
lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCAmelCase : Tuple = to_numpy_array(UpperCamelCase__ )
if infer_channel_dimension_format(UpperCamelCase__ ) == ChannelDimension.LAST:
lowerCAmelCase : int = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.LAST )
return new_image
class snake_case_( a__ ):
__UpperCamelCase = ['''flattened_patches''']
def __init__( self : Any , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : int = 2_0_4_8 , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Optional[int] , ):
super().__init__(**_a )
lowerCAmelCase : List[Any] = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
lowerCAmelCase : List[str] = do_normalize
lowerCAmelCase : List[Any] = do_convert_rgb
lowerCAmelCase : Union[str, Any] = max_patches
lowerCAmelCase : Dict = is_vqa
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : dict , **UpperCamelCase_ : Tuple ):
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
lowerCAmelCase : Any = to_channel_dimension_format(_a , ChannelDimension.FIRST )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_a )
lowerCAmelCase, lowerCAmelCase : int = patch_size['''height'''], patch_size['''width''']
lowerCAmelCase, lowerCAmelCase : List[Any] = get_image_size(_a )
# maximize scale s.t.
lowerCAmelCase : Any = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCAmelCase : List[str] = max(min(math.floor(scale * image_height / patch_height ) , _a ) , 1 )
lowerCAmelCase : str = max(min(math.floor(scale * image_width / patch_width ) , _a ) , 1 )
lowerCAmelCase : int = max(num_feasible_rows * patch_height , 1 )
lowerCAmelCase : Union[str, Any] = max(num_feasible_cols * patch_width , 1 )
lowerCAmelCase : Optional[Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=_a , antialias=_a , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCAmelCase : str = torch_extract_patches(_a , _a , _a )
lowerCAmelCase : str = patches.shape
lowerCAmelCase : Optional[Any] = patches_shape[1]
lowerCAmelCase : str = patches_shape[2]
lowerCAmelCase : str = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCAmelCase : str = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCAmelCase : Union[str, Any] = torch.arange(_a ).reshape([rows, 1] ).repeat(1 , _a ).reshape([rows * columns, 1] )
lowerCAmelCase : List[str] = torch.arange(_a ).reshape([1, columns] ).repeat(_a , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCAmelCase : List[str] = row_ids.to(torch.floataa )
lowerCAmelCase : Union[str, Any] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase : List[str] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase : str = torch.nn.functional.pad(_a , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCAmelCase : Union[str, Any] = to_numpy_array(_a )
return result
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Union[str, Any] ):
if image.dtype == np.uinta:
lowerCAmelCase : Optional[int] = image.astype(np.floataa )
# take mean across the whole `image`
lowerCAmelCase : Optional[int] = np.mean(_a )
lowerCAmelCase : Union[str, Any] = np.std(_a )
lowerCAmelCase : Union[str, Any] = max(_a , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_a , mean=_a , std=_a , **_a )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Dict[str, int]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase : List[str] = patch_size if patch_size is not None else self.patch_size
lowerCAmelCase : Dict = max_patches if max_patches is not None else self.max_patches
lowerCAmelCase : Optional[Any] = self.is_vqa
if kwargs.get('''data_format''' , _a ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
lowerCAmelCase : Dict = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase : Tuple = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase : Union[str, Any] = [to_numpy_array(_a ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
lowerCAmelCase : Optional[int] = kwargs.pop('''font_bytes''' , _a )
lowerCAmelCase : Union[str, Any] = kwargs.pop('''font_path''' , _a )
if isinstance(_a , _a ):
lowerCAmelCase : Tuple = [header_text] * len(_a )
lowerCAmelCase : List[str] = [
render_header(_a , header_text[i] , font_bytes=_a , font_path=_a )
for i, image in enumerate(_a )
]
if do_normalize:
lowerCAmelCase : List[Any] = [self.normalize(image=_a ) for image in images]
# convert to torch tensor and permute
lowerCAmelCase : str = [
self.extract_flattened_patches(image=_a , max_patches=_a , patch_size=_a )
for image in images
]
# create attention mask in numpy
lowerCAmelCase : Any = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCAmelCase : str = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=_a )
return encoded_outputs
| 363
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase : str = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
lowerCAmelCase : List[str] = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] )
lowerCAmelCase : Optional[Any] = bert_tokens
lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case )
while start < end:
lowerCAmelCase : str = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : List[Any] = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j]
lowerCAmelCase : Union[str, Any] = start + i
lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : int = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
lowerCAmelCase : Optional[int] = []
for id in input_ids:
lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
lowerCAmelCase : Any = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[str] = f.readlines()
lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
snake_case__ : int = parser.parse_args()
main(args)
| 314
| 0
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Dict = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _snake_case ( _snake_case : str , _snake_case : str ):
lowerCAmelCase : str = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
lowerCAmelCase : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase : Optional[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_a , output_all_encodings=_a , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase : List[str] = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase : List[str] = os.path.join(get_home_dir() , '''models''' )
lowerCAmelCase : Dict = _load_vocab(_a , _a , _a , cls=_a )
lowerCAmelCase : str = nlp.model.BERTModel(
_a , len(_a ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_a , use_token_type_embed=_a , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_a , use_decoder=_a , )
original_bort.load_parameters(_a , cast_dtype=_a , ignore_extra=_a )
lowerCAmelCase : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase : str = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_a ),
}
lowerCAmelCase : str = BertConfig.from_dict(_a )
lowerCAmelCase : Dict = BertForMaskedLM(_a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_snake_case : int ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_snake_case : Optional[Any] , _snake_case : int ):
lowerCAmelCase : str = hf_param.shape
lowerCAmelCase : Union[str, Any] = to_torch(params[gluon_param] )
lowerCAmelCase : str = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
lowerCAmelCase : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowerCAmelCase : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowerCAmelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowerCAmelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase : Dict = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase : BertSelfAttention = layer.attention.self
lowerCAmelCase : int = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
lowerCAmelCase : Any = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
lowerCAmelCase : Tuple = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
lowerCAmelCase : Any = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
lowerCAmelCase : Optional[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
lowerCAmelCase : List[str] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
lowerCAmelCase : BertSelfOutput = layer.attention.output
lowerCAmelCase : str = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
lowerCAmelCase : int = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
lowerCAmelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
lowerCAmelCase : str = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
lowerCAmelCase : BertIntermediate = layer.intermediate
lowerCAmelCase : Optional[int] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
lowerCAmelCase : str = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
lowerCAmelCase : BertOutput = layer.output
lowerCAmelCase : int = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
lowerCAmelCase : str = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
lowerCAmelCase : Dict = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
lowerCAmelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase : List[str] = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowerCAmelCase : Optional[int] = tokenizer.encode_plus(_a )["input_ids"]
# Get gluon output
lowerCAmelCase : Optional[Any] = mx.nd.array([input_ids] )
lowerCAmelCase : Optional[Any] = original_bort(inputs=_a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_a )
lowerCAmelCase : int = BertModel.from_pretrained(_a )
hf_bort_model.eval()
lowerCAmelCase : Tuple = tokenizer.encode_plus(_a , return_tensors='''pt''' )
lowerCAmelCase : int = hf_bort_model(**_a )[0]
lowerCAmelCase : List[str] = output_gluon[0].asnumpy()
lowerCAmelCase : Tuple = output_hf[0].detach().numpy()
lowerCAmelCase : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase : Dict = np.allclose(_a , _a , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _a )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
snake_case__ : int = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 364
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class snake_case_( snake_case__ ):
__UpperCamelCase = '''lxmert'''
__UpperCamelCase = {}
def __init__( self : Tuple , UpperCamelCase_ : Tuple=3_0_5_2_2 , UpperCamelCase_ : int=7_6_8 , UpperCamelCase_ : Optional[int]=1_2 , UpperCamelCase_ : Union[str, Any]=9_5_0_0 , UpperCamelCase_ : Optional[Any]=1_6_0_0 , UpperCamelCase_ : Tuple=4_0_0 , UpperCamelCase_ : List[str]=3_0_7_2 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=5_1_2 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[str]=1E-12 , UpperCamelCase_ : str=9 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : Optional[int]=5 , UpperCamelCase_ : List[Any]=2_0_4_8 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Optional[int]=6.67 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Dict=True , **UpperCamelCase_ : List[str] , ):
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Optional[Any] = type_vocab_size
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Any = num_qa_labels
lowerCAmelCase : str = num_object_labels
lowerCAmelCase : Optional[int] = num_attr_labels
lowerCAmelCase : Tuple = l_layers
lowerCAmelCase : str = x_layers
lowerCAmelCase : Dict = r_layers
lowerCAmelCase : Optional[Any] = visual_feat_dim
lowerCAmelCase : Tuple = visual_pos_dim
lowerCAmelCase : Optional[Any] = visual_loss_normalizer
lowerCAmelCase : Dict = task_matched
lowerCAmelCase : Optional[int] = task_mask_lm
lowerCAmelCase : str = task_obj_predict
lowerCAmelCase : Any = task_qa
lowerCAmelCase : int = visual_obj_loss
lowerCAmelCase : List[str] = visual_attr_loss
lowerCAmelCase : Any = visual_feat_loss
lowerCAmelCase : List[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**UpperCAmelCase_ )
| 365
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class snake_case_( lowerCamelCase_ ):
__UpperCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase = Features({'''audio''': Audio()} )
__UpperCamelCase = Features({'''transcription''': Value('''string''' )} )
__UpperCamelCase = "audio"
__UpperCamelCase = "transcription"
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Dict ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCAmelCase__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCAmelCase : Dict = copy.deepcopy(self )
lowerCAmelCase : Any = self.input_schema.copy()
lowerCAmelCase : int = features[self.audio_column]
lowerCAmelCase : str = input_schema
return task_template
@property
def lowerCamelCase__ ( self : Dict ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case__ : str = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 367
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Optional[Any] = '''
import os
'''
snake_case__ : Tuple = '''
def foo():
import os
return False
'''
snake_case__ : Any = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : int = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : Tuple = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Any = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ):
super().__init__()
lowerCAmelCase : Dict = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : Union[str, Any] = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : str = name
def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : str = global_step_float / warmup_steps_float
lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ):
lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , )
if num_warmup_steps:
lowerCAmelCase : List[str] = WarmUp(
initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , )
if weight_decay_rate > 0.0:
lowerCAmelCase : Dict = AdamWeightDecay(
learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , )
else:
lowerCAmelCase : Any = tf.keras.optimizers.Adam(
learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case_( a__ ):
def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = weight_decay_rate
lowerCAmelCase : List[str] = include_in_weight_decay
lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ):
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : Dict = apply_state or {}
lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ):
lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class snake_case_( a__ ):
def __init__( self : Any ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = None
@property
def lowerCamelCase__ ( self : List[str] ):
if self._accum_steps is None:
lowerCAmelCase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Any ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
if not self._gradients:
lowerCAmelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 314
| 0
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case_( a__ , a__ ):
__UpperCamelCase = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase_ : Union[str, Any]=2_0_0_0 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Optional[int]=2_0 , UpperCamelCase_ : Optional[Any]=1E-3 ):
lowerCAmelCase : Dict = None
lowerCAmelCase : Tuple = None
lowerCAmelCase : Tuple = None
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int = None ):
lowerCAmelCase : Tuple = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any]=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase : List[Any] = std.unsqueeze(-1 )
lowerCAmelCase : List[str] = -score / std
# compute
lowerCAmelCase : str = -1.0 / len(self.timesteps )
lowerCAmelCase : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase : List[str] = beta_t.unsqueeze(-1 )
lowerCAmelCase : Dict = -0.5 * beta_t * x
lowerCAmelCase : Dict = torch.sqrt(_a )
lowerCAmelCase : List[Any] = drift - diffusion**2 * score
lowerCAmelCase : List[str] = x + drift * dt
# add noise
lowerCAmelCase : int = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
lowerCAmelCase : List[str] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 369
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ):
lowerCAmelCase : Tuple = set()
# Replace all the whitespace in our sentence
lowerCAmelCase : Optional[int] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_snake_case ) == 26
def _snake_case ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ):
lowerCAmelCase : Any = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase : Union[str, Any] = True
elif char.isupper():
lowerCAmelCase : Optional[int] = True
return all(_snake_case )
def _snake_case ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _snake_case ( ):
from timeit import timeit
lowerCAmelCase : Optional[Any] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit('''is_pangram()''' , setup=_snake_case ) )
print(timeit('''is_pangram_faster()''' , setup=_snake_case ) )
print(timeit('''is_pangram_fastest()''' , setup=_snake_case ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 370
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _snake_case ( _snake_case : Optional[int] ):
lowerCAmelCase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape
lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase : Tuple = emb.weight.data
return lin_layer
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ):
lowerCAmelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
lowerCAmelCase : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCAmelCase : Tuple = state_dict[old_key]
return new_dict
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ):
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Tuple = 0
os.makedirs(_snake_case , exist_ok=_snake_case )
for expert in range(_snake_case ):
lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(_snake_case ):
lowerCAmelCase : List[str] = torch.load(_snake_case )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Any = os.path.join(
_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
torch.save(_snake_case , _snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_snake_case )[0]].dtype )
# Add the last block
lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_snake_case ) == 1:
lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case )
torch.save(_snake_case , _snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_snake_case , _snake_case )
# Otherwise, let's build the index
lowerCAmelCase : Dict = {}
for idx, shard in enumerate(_snake_case ):
lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' )
lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) )
for key in shard:
lowerCAmelCase : List[Any] = shard_file
# Add the metadata
lowerCAmelCase : Dict = {'''total_size''': total_size}
lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n'''
f.write(_snake_case )
return metadata, index
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
snake_case__ : List[str] = parser.parse_args()
snake_case__ , snake_case__ : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
snake_case__ : str = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 314
| 0
|
"""simple docstring"""
from typing import Any
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : Any ):
lowerCAmelCase : List[Any] = data
lowerCAmelCase : Dict = None
class snake_case_:
def __init__( self : Union[str, Any] ):
lowerCAmelCase : str = None
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : int = self.head
while temp is not None:
print(temp.data , end=''' ''' )
lowerCAmelCase : Optional[Any] = temp.next
print()
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : int = Node(snake_case_ )
lowerCAmelCase : List[str] = self.head
lowerCAmelCase : List[str] = new_node
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
if node_data_a == node_data_a:
return
else:
lowerCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : List[Any] = node_a.next
lowerCAmelCase : Any = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : int = node_a.next
if node_a is None or node_a is None:
return
lowerCAmelCase : Tuple = node_a.data, node_a.data
if __name__ == "__main__":
snake_case__ : List[str] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 371
|
"""simple docstring"""
from math import sqrt
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase : Dict = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase : Optional[int] = False
for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase : int = False
break
# precondition
assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool"
return status
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) )
lowerCAmelCase : Optional[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_snake_case ) ):
for j in range(i + 1 , len(_snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase : Any = 0
# filters actual prime numbers.
lowerCAmelCase : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase : Tuple = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_snake_case ):
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase : Dict = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : List[str] = number
if number == 0 or number == 1:
ans.append(_snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_snake_case ):
while quotient != 1:
if is_prime(_snake_case ) and (quotient % factor == 0):
ans.append(_snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : Tuple ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : Optional[Any] = 0
# prime factorization of 'number'
lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Any = max(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Dict ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : int = 0
# prime factorization of 'number'
lowerCAmelCase : List[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Optional[int] = min(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( _snake_case : Tuple ):
assert (
isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case )
), "'number' must been an int, even and > 2"
lowerCAmelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case )
lowerCAmelCase : Optional[Any] = len(_snake_case )
# run variable for while-loops.
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase : str = True
while i < len_pn and loop:
lowerCAmelCase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase : Dict = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (len(_snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Dict = 0
while numbera != 0:
lowerCAmelCase : Union[str, Any] = numbera % numbera
lowerCAmelCase : List[Any] = numbera
lowerCAmelCase : List[Any] = rest
# precondition
assert isinstance(_snake_case , _snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase : List[str] = prime_factorization(_snake_case )
lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case )
elif numbera == 1 or numbera == 1:
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[str] = max(_snake_case , _snake_case )
lowerCAmelCase : Dict = 0
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case )
lowerCAmelCase : Any = prime_fac_a.count(_snake_case )
for _ in range(max(_snake_case , _snake_case ) ):
ans *= n
else:
lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( _snake_case : Any ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_snake_case ):
ans += 1
# precondition
assert isinstance(_snake_case , _snake_case ) and is_prime(
_snake_case ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Dict ):
assert (
is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number
lowerCAmelCase : str = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
while number < p_number_a:
ans.append(_snake_case )
number += 1
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and ans[0] != p_number_a
and ans[len(_snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( _snake_case : List[Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_snake_case )
# precondition
assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase : int = get_divisors(_snake_case )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (divisors[0] == 1)
and (divisors[len(_snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( _snake_case : Optional[int] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase : Optional[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase : Dict = 0
lowerCAmelCase : Dict = 1
lowerCAmelCase : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase : int = ans
ans += fiba
lowerCAmelCase : Optional[Any] = tmp
return ans
| 314
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case_:
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def lowerCamelCase__ ( self : Any ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCamelCase__ ( self : str ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCamelCase__ ( self : Optional[Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = torch.arange(self.height * self.width )
lowerCAmelCase : str = torch.stack(
[
pixel_indices % self.width,
torch.div(__lowercase , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.shape
lowerCAmelCase : Tuple = int(np.prod(__lowercase ) )
lowerCAmelCase : Any = self.get_image_coords()
lowerCAmelCase : str = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCAmelCase : Optional[Any] = self.get_camera_rays(__lowercase )
lowerCAmelCase : int = rays.view(__lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : torch.Tensor ):
lowerCAmelCase : Union[str, Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCAmelCase : Union[str, Any] = coords.view(__lowercase , -1 , 2 )
lowerCAmelCase : Optional[int] = self.resolution()
lowerCAmelCase : Tuple = self.fov()
lowerCAmelCase : List[str] = (flat.float() / (res - 1)) * 2 - 1
lowerCAmelCase : Optional[int] = fracs * torch.tan(fov / 2 )
lowerCAmelCase : Tuple = fracs.view(__lowercase , -1 , 2 )
lowerCAmelCase : Tuple = (
self.z.view(__lowercase , 1 , 3 )
+ self.x.view(__lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCAmelCase : str = directions / directions.norm(dim=-1 , keepdim=__lowercase )
lowerCAmelCase : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(__lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__lowercase , *__lowercase , 2 , 3 )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__lowercase , height=__lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _snake_case ( _snake_case : Any ):
lowerCAmelCase : List[str] = []
lowerCAmelCase : Dict = []
lowerCAmelCase : Dict = []
lowerCAmelCase : Union[str, Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowerCAmelCase : Union[str, Any] = np.array([np.sin(__lowerCAmelCase ), np.cos(__lowerCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCAmelCase : Optional[Any] = -z * 4
lowerCAmelCase : Any = np.array([np.cos(__lowerCAmelCase ), -np.sin(__lowerCAmelCase ), 0.0] )
lowerCAmelCase : Union[str, Any] = np.cross(__lowerCAmelCase , __lowerCAmelCase )
origins.append(__lowerCAmelCase )
xs.append(__lowerCAmelCase )
ys.append(__lowerCAmelCase )
zs.append(__lowerCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0 ) ).float() , width=__lowerCAmelCase , height=__lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCAmelCase )) , )
| 350
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Any = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_( a__ ):
__UpperCamelCase = '''vit_msn'''
def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : List[str] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case__ : Tuple = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = ["""PerceiverFeatureExtractor"""]
snake_case__ : Dict = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
snake_case__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case )
lowerCAmelCase : Optional[int] = {
'''repo_id''': str(_snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_snake_case , _snake_case , indent=4 )
def _snake_case ( _snake_case : Any ):
if params.n_gpu <= 0:
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Dict = True
lowerCAmelCase : int = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
lowerCAmelCase : int = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node
lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node
lowerCAmelCase : str = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = 1
lowerCAmelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase : List[Any] = params.n_nodes > 1
# summary
lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _snake_case ( _snake_case : Optional[int] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 314
| 0
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Any = """▁"""
snake_case__ : List[str] = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
snake_case__ : Tuple = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
snake_case__ : Union[str, Any] = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
snake_case__ : Dict = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
snake_case__ : int = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class snake_case_( lowerCAmelCase_ ):
__UpperCamelCase = ["input_ids"]
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = RESOURCE_FILES_NAMES
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : str=False , UpperCamelCase_ : List[Any]="utf8" , UpperCamelCase_ : str="[UNK]" , UpperCamelCase_ : int="[SEP]" , UpperCamelCase_ : int="[PAD]" , UpperCamelCase_ : int="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : str = None , **UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase : str = do_lower_case
lowerCAmelCase : Tuple = sentencepiece_model_ckpt
lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : List[str] = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase : List[str] = {v: k for k, v in self.vocab.items()}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Tuple ):
if text is None:
return None
lowerCAmelCase : Union[str, Any] = self.tokenize(__SCREAMING_SNAKE_CASE )
lowerCAmelCase, lowerCAmelCase : Optional[int] = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase : Union[str, Any] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Dict = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase : Optional[Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase : Optional[int] = token[1:]
lowerCAmelCase : List[Any] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
lowerCAmelCase : Optional[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase : Optional[int] = end
return token_mapping
@property
def lowerCamelCase__ ( self : Any ):
return len(self.vocab )
def lowerCamelCase__ ( self : Union[str, Any] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[int] ):
lowerCAmelCase : Dict = self.__dict__.copy()
lowerCAmelCase : Tuple = None
return state
def __setstate__( self : int , UpperCamelCase_ : List[str] ):
lowerCAmelCase : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase : Dict = {}
lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Dict ):
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , UpperCamelCase_ : int=6_4 , UpperCamelCase_ : int=0.1 ):
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowerCAmelCase : Optional[int] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowerCAmelCase : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowerCAmelCase : Union[str, Any] = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowerCAmelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : int = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
lowerCAmelCase : Optional[int] = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase : List[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase : Dict = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : Optional[Any] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : str = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[Any] ):
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Any ):
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : List[str] = [self.cls_token_id]
lowerCAmelCase : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int = None ):
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Optional[Any] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
lowerCAmelCase : Any = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Any = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = line.rstrip('''\n''' )
lowerCAmelCase : int = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str = None ):
lowerCAmelCase : int = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCAmelCase : Any = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowerCAmelCase : Optional[int] = token_index
writer.write(token + '''\n''' )
index += 1
lowerCAmelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 352
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
lowerCAmelCase : str = sylvester(number - 1 )
lowerCAmelCase : Optional[Any] = num - 1
lowerCAmelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 314
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
snake_case__ : str = None
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : List[Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
snake_case__ : Union[str, Any] = {
'''camembert-base''': 512,
}
snake_case__ : str = '''▁'''
class snake_case_( snake_case_ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = CamembertTokenizer
def __init__( self : Optional[Any] , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]="<s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Any="</s>" , UpperCamelCase_ : Tuple="<s>" , UpperCamelCase_ : Union[str, Any]="<unk>" , UpperCamelCase_ : Optional[Any]="<pad>" , UpperCamelCase_ : List[str]="<mask>" , UpperCamelCase_ : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : int = vocab_file
lowerCAmelCase : List[str] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
lowerCAmelCase : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] = None ):
lowerCAmelCase : Dict = [self.sep_token_id]
lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 353
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase : Union[str, Any] = 6
lowerCAmelCase : Any = 128
lowerCAmelCase : List[Any] = (2, 2, 18, 2)
lowerCAmelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase : Tuple = 12
lowerCAmelCase : Dict = 192
lowerCAmelCase : List[str] = (2, 2, 18, 2)
lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCAmelCase : Optional[int] = window_size
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : int = num_heads
return config
def _snake_case ( _snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase : str = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase : List[Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : Optional[Any] = int(key_split[4] )
lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase : Dict = val[:dim, :]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : str = val[
:dim
]
lowerCAmelCase : List[str] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[Any] = val[
-dim:
]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ):
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model''']
lowerCAmelCase : List[Any] = get_swin_config(_snake_case )
lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314
| 0
|
"""simple docstring"""
from __future__ import annotations
import requests
def _snake_case ( _snake_case : str ):
lowerCAmelCase : List[str] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowerCAmelCase ).json()
def _snake_case ( _snake_case : Any = 10 ):
lowerCAmelCase : int = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase : int = requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def _snake_case ( _snake_case : Optional[Any] = 10 ):
lowerCAmelCase : Tuple = hackernews_top_stories(_lowerCAmelCase )
return "\n".join('''* [{title}]({url})'''.format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 354
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = image[0].size
lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase : List[str] = 2.0 * image - 1.0
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 )
return image
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : str = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = mask[0].size
lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 )
return mask
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = image
lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ )
lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ )
lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : Union[str, Any] = original_image.shape
lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device )
lowerCAmelCase : Optional[int] = eta
lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1
lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = t
lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class snake_case_( lowerCAmelCase__ ):
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=1_3 , UpperCamelCase_ : Any=7 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=False , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=9_9 , UpperCamelCase_ : List[Any]=3_2 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=3_7 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Optional[Any]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Optional[int] = use_input_mask
lowerCAmelCase : Dict = use_token_type_ids
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Optional[Any] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Tuple = num_labels
lowerCAmelCase : int = num_choices
lowerCAmelCase : List[Any] = scope
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Dict = None
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : str ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Any = DistilBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = DistilBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = DistilBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : Optional[int] = DistilBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
lowerCAmelCase : int = self.num_labels
lowerCAmelCase : Tuple = DistilBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : int = DistilBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
((lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase)) : Optional[int] = config_and_inputs
lowerCAmelCase : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__UpperCamelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCamelCase = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = DistilBertModelTester(self )
lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=3_7 )
def lowerCamelCase__ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = DistilBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase : str = True
lowerCAmelCase : Optional[Any] = model_class(config=_SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''traced_model.pt''' ) )
lowerCAmelCase : List[str] = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , '''traced_model.pt''' ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict['''input_ids'''].to(_SCREAMING_SNAKE_CASE ) , inputs_dict['''attention_mask'''].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class snake_case_( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase : List[str] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 355
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : int = -1
lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : str = TextStreamer(UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Any = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
lowerCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = -1
lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = -1
lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : str = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : List[str] = ''''''
for new_text in streamer:
streamer_text += new_text
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case__ : Optional[Any] = False
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ):
set_seed(0 )
lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 )
lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
lowerCAmelCase : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 314
| 0
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
snake_case__ : int = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class snake_case_( nn.Module ):
def __init__( self : Any , UpperCamelCase_ : int ):
super().__init__()
lowerCAmelCase : Any = torchvision.models.resnetaaa(pretrained=lowerCamelCase__ )
lowerCAmelCase : List[str] = list(model.children() )[:-2]
lowerCAmelCase : Optional[int] = nn.Sequential(*lowerCamelCase__ )
lowerCAmelCase : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = self.pool(self.model(lowerCamelCase__ ) )
lowerCAmelCase : int = torch.flatten(lowerCamelCase__ , start_dim=2 )
lowerCAmelCase : int = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class snake_case_( __lowerCAmelCase ):
def __init__( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = [json.loads(lowerCamelCase__ ) for l in open(lowerCamelCase__ )]
lowerCAmelCase : Any = os.path.dirname(lowerCamelCase__ )
lowerCAmelCase : int = tokenizer
lowerCAmelCase : int = labels
lowerCAmelCase : Optional[Any] = len(lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = max_seq_length
lowerCAmelCase : Optional[int] = transforms
def __len__( self : Optional[int] ):
return len(self.data )
def __getitem__( self : Optional[int] , UpperCamelCase_ : Dict ):
lowerCAmelCase : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowerCamelCase__ ) )
lowerCAmelCase : List[str] = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase : List[str] = sentence[: self.max_seq_length]
lowerCAmelCase : Tuple = torch.zeros(self.n_classes )
lowerCAmelCase : int = 1
lowerCAmelCase : Tuple = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
lowerCAmelCase : List[str] = self.transforms(lowerCamelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def _snake_case ( _snake_case : int ) -> List[Any]:
lowerCAmelCase : Tuple = [len(row['''sentence'''] ) for row in batch]
lowerCAmelCase : List[str] = len(_snake_case ), max(_snake_case )
lowerCAmelCase : List[str] = torch.zeros(_snake_case , _snake_case , dtype=torch.long )
lowerCAmelCase : str = torch.zeros(_snake_case , _snake_case , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_snake_case , _snake_case ) ):
lowerCAmelCase : Tuple = input_row['''sentence''']
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : str = torch.stack([row['''image'''] for row in batch] )
lowerCAmelCase : Union[str, Any] = torch.stack([row['''label'''] for row in batch] )
lowerCAmelCase : int = torch.stack([row['''image_start_token'''] for row in batch] )
lowerCAmelCase : str = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _snake_case ( ) -> Any:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _snake_case ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 357
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ):
lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ )
lowerCAmelCase : Any = nsfw_detected.flatten()
lowerCAmelCase : Dict = nsfw_detected > p_threshold
lowerCAmelCase : int = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowerCAmelCase : List[Any] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Union[str, Any] = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 314
| 0
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Union[str, Any]=True , _snake_case : List[Any]="pt" ):
lowerCAmelCase : int = {'''add_prefix_space''': True} if isinstance(A__ , A__ ) and not line.startswith(''' ''' ) else {}
lowerCAmelCase : List[Any] = padding_side
return tokenizer(
[line] , max_length=A__ , padding='''max_length''' if pad_to_max_length else None , truncation=A__ , return_tensors=A__ , add_special_tokens=A__ , **A__ , )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : int , _snake_case : str=None , ):
lowerCAmelCase : int = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case_( lowerCamelCase__ ):
def __init__( self : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple="train" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[Any]="" , ):
super().__init__()
lowerCAmelCase : Tuple = Path(lowercase__ ).joinpath(type_path + '''.source''' )
lowerCAmelCase : Any = Path(lowercase__ ).joinpath(type_path + '''.target''' )
lowerCAmelCase : Dict = self.get_char_lens(self.src_file )
lowerCAmelCase : str = max_source_length
lowerCAmelCase : str = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
lowerCAmelCase : Tuple = tokenizer
lowerCAmelCase : Dict = prefix
if n_obs is not None:
lowerCAmelCase : Any = self.src_lens[:n_obs]
lowerCAmelCase : List[str] = src_lang
lowerCAmelCase : List[str] = tgt_lang
def __len__( self : Optional[Any] ):
return len(self.src_lens )
def __getitem__( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Any = index + 1 # linecache starts at 1
lowerCAmelCase : List[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase__ ).rstrip('''\n''' )
lowerCAmelCase : Dict = linecache.getline(str(self.tgt_file ) , lowercase__ ).rstrip('''\n''' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase__ ) else self.tokenizer
)
lowerCAmelCase : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowercase__ ) else self.tokenizer
lowerCAmelCase : Optional[int] = encode_line(lowercase__ , lowercase__ , self.max_source_length , '''right''' )
lowerCAmelCase : Tuple = encode_line(lowercase__ , lowercase__ , self.max_target_length , '''right''' )
lowerCAmelCase : List[Any] = source_inputs['''input_ids'''].squeeze()
lowerCAmelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
lowerCAmelCase : Union[str, Any] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase__ ( UpperCamelCase_ : List[str] ):
return [len(lowercase__ ) for x in Path(lowercase__ ).open().readlines()]
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Tuple = torch.stack([x['''input_ids'''] for x in batch] )
lowerCAmelCase : List[Any] = torch.stack([x['''attention_mask'''] for x in batch] )
lowerCAmelCase : str = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCAmelCase : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase__ )
else self.tokenizer.pad_token_id
)
lowerCAmelCase : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase__ )
else self.tokenizer.pad_token_id
)
lowerCAmelCase : List[str] = trim_batch(lowercase__ , lowercase__ )
lowerCAmelCase, lowerCAmelCase : Dict = trim_batch(lowercase__ , lowercase__ , attention_mask=lowercase__ )
lowerCAmelCase : Tuple = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
snake_case__ : Optional[int] = getLogger(__name__)
def _snake_case ( _snake_case : Optional[Any] ):
return list(itertools.chain.from_iterable(A__ ) )
def _snake_case ( _snake_case : Dict ):
lowerCAmelCase : Union[str, Any] = get_git_info()
save_json(A__ , os.path.join(A__ , '''git_log.json''' ) )
def _snake_case ( _snake_case : Dict , _snake_case : int , _snake_case : Optional[Any]=4 , **_snake_case : str ):
with open(A__ , '''w''' ) as f:
json.dump(A__ , A__ , indent=A__ , **A__ )
def _snake_case ( _snake_case : Any ):
with open(A__ ) as f:
return json.load(A__ )
def _snake_case ( ):
lowerCAmelCase : List[str] = git.Repo(search_parent_directories=A__ )
lowerCAmelCase : Tuple = {
'''repo_id''': str(A__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _snake_case ( _snake_case : Tuple , _snake_case : Optional[Any] ):
return list(map(A__ , A__ ) )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] ):
with open(A__ , '''wb''' ) as f:
return pickle.dump(A__ , A__ )
def _snake_case ( _snake_case : int ):
def remove_articles(_snake_case : Tuple ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , A__ )
def white_space_fix(_snake_case : List[Any] ):
return " ".join(text.split() )
def remove_punc(_snake_case : Optional[Any] ):
lowerCAmelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple ):
lowerCAmelCase : Union[str, Any] = normalize_answer(A__ ).split()
lowerCAmelCase : Optional[Any] = normalize_answer(A__ ).split()
lowerCAmelCase : List[Any] = Counter(A__ ) & Counter(A__ )
lowerCAmelCase : List[str] = sum(common.values() )
if num_same == 0:
return 0
lowerCAmelCase : Union[str, Any] = 1.0 * num_same / len(A__ )
lowerCAmelCase : str = 1.0 * num_same / len(A__ )
lowerCAmelCase : Any = (2 * precision * recall) / (precision + recall)
return fa
def _snake_case ( _snake_case : int , _snake_case : Any ):
return normalize_answer(A__ ) == normalize_answer(A__ )
def _snake_case ( _snake_case : List[Any] , _snake_case : Union[str, Any] ):
assert len(A__ ) == len(A__ )
lowerCAmelCase : str = 0
for hypo, pred in zip(A__ , A__ ):
em += exact_match_score(A__ , A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def _snake_case ( _snake_case : str ):
return model_prefix.startswith('''rag''' )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ):
lowerCAmelCase : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase : List[Any] = '''dropout_rate'''
for p in extra_params:
if getattr(A__ , A__ , A__ ):
if not hasattr(A__ , A__ ) and not hasattr(A__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A__ ) )
delattr(A__ , A__ )
continue
lowerCAmelCase : Any = p if hasattr(A__ , A__ ) else equivalent_param[p]
setattr(A__ , A__ , getattr(A__ , A__ ) )
delattr(A__ , A__ )
return hparams, config
| 358
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Union[str, Any] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
snake_case__ : Optional[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BertTokenizer
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCAmelCase : Tuple = do_lower_case
lowerCAmelCase : Union[str, Any] = strip_accents
lowerCAmelCase : Tuple = tokenize_chinese_chars
lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ):
lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any]=False ):
try:
lowerCAmelCase : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase : str = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase : Optional[Any] = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
snake_case__ : Dict = parse_flag_from_env('''RUN_SLOW''', default=False)
snake_case__ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
snake_case__ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
snake_case__ : Optional[Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
snake_case__ : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
snake_case__ : Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
snake_case__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
snake_case__ : Tuple = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
snake_case__ : List[str] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
snake_case__ : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
snake_case__ : Optional[Any] = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _snake_case ( _snake_case : int ):
try:
import faiss # noqa
except ImportError:
lowerCAmelCase : Any = unittest.skip('''test requires faiss''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : str ):
try:
import regex # noqa
except ImportError:
lowerCAmelCase : Dict = unittest.skip('''test requires regex''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : Optional[int] ):
try:
import elasticsearch # noqa
except ImportError:
lowerCAmelCase : Optional[Any] = unittest.skip('''test requires elasticsearch''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : Optional[Any] ):
try:
import sqlalchemy # noqa
except ImportError:
lowerCAmelCase : Tuple = unittest.skip('''test requires sqlalchemy''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : Optional[int] ):
if not config.TORCH_AVAILABLE:
lowerCAmelCase : str = unittest.skip('''test requires PyTorch''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : Union[str, Any] ):
if not config.TF_AVAILABLE:
lowerCAmelCase : List[str] = unittest.skip('''test requires TensorFlow''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : str ):
if not config.JAX_AVAILABLE:
lowerCAmelCase : Any = unittest.skip('''test requires JAX''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : Optional[int] ):
if not config.PIL_AVAILABLE:
lowerCAmelCase : Dict = unittest.skip('''test requires Pillow''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : Optional[Any] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_UpperCAmelCase )
else:
return test_case
def _snake_case ( _snake_case : Any ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_UpperCAmelCase )
else:
return test_case
def _snake_case ( _snake_case : List[str] ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_UpperCAmelCase )
else:
return test_case
def _snake_case ( _snake_case : Tuple ):
def _require_spacy_model(_snake_case : Optional[Any] ):
try:
import spacy # noqa F401
spacy.load(_UpperCAmelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_UpperCAmelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_UpperCAmelCase ) )(_UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def _snake_case ( _snake_case : List[str] ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_UpperCAmelCase )
else:
return test_case
def _snake_case ( _snake_case : Union[str, Any] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_UpperCAmelCase )
else:
return test_case
def _snake_case ( _snake_case : List[Any] ):
if not _run_slow_tests or _run_slow_tests == 0:
lowerCAmelCase : Dict = unittest.skip('''test is slow''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : List[Any] ):
if not _run_local_tests or _run_local_tests == 0:
lowerCAmelCase : Union[str, Any] = unittest.skip('''test is local''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : List[str] ):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCAmelCase : str = unittest.skip('''test is packaged''' )(_UpperCAmelCase )
return test_case
def _snake_case ( _snake_case : Union[str, Any] ):
if not _run_remote_tests or _run_remote_tests == 0:
lowerCAmelCase : Tuple = unittest.skip('''test requires remote''' )(_UpperCAmelCase )
return test_case
def _snake_case ( *_snake_case : Dict ):
def decorate(cls : str ):
for name, fn in cls.__dict__.items():
if callable(_UpperCAmelCase ) and name.startswith('''test''' ):
for decorator in decorators:
lowerCAmelCase : str = decorator(_UpperCAmelCase )
setattr(cls , _UpperCAmelCase , _UpperCAmelCase )
return cls
return decorate
class snake_case_( SCREAMING_SNAKE_CASE__ ):
pass
class snake_case_( SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 2
@contextmanager
def _snake_case ( _snake_case : str=OfflineSimulationMode.CONNECTION_FAILS , _snake_case : Tuple=1E-16 ):
lowerCAmelCase : List[Any] = requests.Session().request
def timeout_request(_snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , **_snake_case : int ):
# Change the url to an invalid url so that the connection hangs
lowerCAmelCase : Optional[Any] = 'https://10.255.255.1'
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
lowerCAmelCase : str = timeout
try:
return online_request(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCAmelCase : str = url
lowerCAmelCase : List[Any] = e.args[0]
lowerCAmelCase : Union[str, Any] = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
lowerCAmelCase : str = (max_retry_error,)
raise
def raise_connection_error(_snake_case : List[Any] , _snake_case : str , **_snake_case : List[Any] ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _snake_case ( *_snake_case : List[Any] , **_snake_case : Dict ):
lowerCAmelCase : int = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_UpperCAmelCase , **_UpperCAmelCase ) as tmp_dir:
try:
os.chdir(_UpperCAmelCase )
yield
finally:
os.chdir(_UpperCAmelCase )
@contextmanager
def _snake_case ( ):
import gc
gc.collect()
lowerCAmelCase : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _snake_case ( ):
import gc
gc.collect()
lowerCAmelCase : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _snake_case ( _snake_case : List[Any] , _snake_case : int ):
return deepcopy(_UpperCAmelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(_UpperCAmelCase ).integers(0 , 100 , 10 ).tolist()
def _snake_case ( _snake_case : Optional[int] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_snake_case : Tuple , *_snake_case : Tuple , **_snake_case : str ):
try:
return func(*_UpperCAmelCase , **_UpperCAmelCase )
except HTTPError as err:
if str(_UpperCAmelCase ).startswith('''500''' ) or str(_UpperCAmelCase ).startswith('''502''' ):
pytest.xfail(str(_UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , _UpperCAmelCase )
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[Any] = returncode
lowerCAmelCase : Union[str, Any] = stdout
lowerCAmelCase : List[Any] = stderr
async def _snake_case ( _snake_case : Tuple , _snake_case : List[str] ):
while True:
lowerCAmelCase : Optional[int] = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def _snake_case ( _snake_case : Dict , _snake_case : str=None , _snake_case : Union[str, Any]=None , _snake_case : Optional[Any]=None , _snake_case : Dict=False , _snake_case : Union[str, Any]=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(_UpperCAmelCase ) )
lowerCAmelCase : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Union[str, Any] = []
def tee(_snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Dict="" ):
lowerCAmelCase : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _snake_case : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _snake_case : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any]=None , _snake_case : List[str]=None , _snake_case : str=180 , _snake_case : List[str]=False , _snake_case : Optional[Any]=True ):
lowerCAmelCase : Optional[Any] = asyncio.get_event_loop()
lowerCAmelCase : int = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
lowerCAmelCase : str = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
lowerCAmelCase : Optional[int] = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
lowerCAmelCase : Tuple = re.sub(r'''^gw''' , '''''' , _UpperCAmelCase , 0 , re.M )
return int(_UpperCAmelCase )
def _snake_case ( ):
lowerCAmelCase : Tuple = 29500
lowerCAmelCase : Tuple = pytest_xdist_worker_id()
return port + uniq_delta
| 359
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_( _lowercase ):
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple=1_3 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Optional[int]=9_9 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : Optional[Any]=3_7 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict="None" , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[Any]=None , ):
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : int = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Optional[int] = use_input_mask
lowerCAmelCase : Optional[Any] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : int = relative_attention
lowerCAmelCase : Optional[int] = position_biased_input
lowerCAmelCase : List[str] = pos_att_type
lowerCAmelCase : Optional[Any] = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : str = None
if self.use_input_mask:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Any = None
lowerCAmelCase : List[str] = None
lowerCAmelCase : List[str] = None
if self.use_labels:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[Any] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = DebertaVaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase : Any = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
lowerCAmelCase : Optional[int] = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
lowerCAmelCase : Any = model(__UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Union[str, Any] = DebertaVaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = self.num_labels
lowerCAmelCase : Union[str, Any] = DebertaVaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase : Tuple = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ):
lowerCAmelCase : int = self.num_labels
lowerCAmelCase : Tuple = DebertaVaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ):
lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase : Optional[int] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ):
lowerCAmelCase : Optional[int] = DebertaVaForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Optional[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( _lowercase , _lowercase , unittest.TestCase ):
__UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = DebertaVaModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowerCamelCase__ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__UpperCamelCase )
@slow
def lowerCamelCase__ ( self : str ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[Any] = DebertaVaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
lowerCAmelCase : Dict = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase : Tuple = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 360
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 50000000 ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) )
lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) )
for primea in primes:
lowerCAmelCase : Optional[Any] = primea * primea
for primea in primes:
lowerCAmelCase : List[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCAmelCase : Tuple = primea * primea * primea * primea
lowerCAmelCase : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
| 0
|
"""simple docstring"""
import math
import random
def _snake_case ( _snake_case : float , _snake_case : bool = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
snake_case__ : str = 0.0_2
def _snake_case ( _snake_case : int , _snake_case : int ):
lowerCAmelCase : str = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(_snake_case ):
# Forward propagation
lowerCAmelCase : List[str] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCAmelCase : str = (expected / 100) - layer_a
# Error delta
lowerCAmelCase : List[Any] = layer_1_error * sigmoid_function(_snake_case , _snake_case )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : str = int(input('''Expected value: '''))
snake_case__ : Dict = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Tuple = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor''']
snake_case__ : List[Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
snake_case__ : Optional[Any] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
import os
from pathlib import Path
def _snake_case ( ):
from torch.utils.cpp_extension import load
lowerCAmelCase : List[str] = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCAmelCase : Optional[int] = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 362
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ):
lowerCAmelCase : Tuple = '''bilinear'''
lowerCAmelCase : List[Any] = max_size
lowerCAmelCase : Optional[int] = short_edge_length
def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = []
for img in imgs:
lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w
else:
lowerCAmelCase, lowerCAmelCase : int = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = newh * scale
lowerCAmelCase : str = neww * scale
lowerCAmelCase : Union[str, Any] = int(neww + 0.5 )
lowerCAmelCase : str = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCAmelCase : Optional[int] = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class snake_case_:
def __init__( self : Tuple , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT
lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY
lowerCAmelCase : int = cfg.PAD_VALUE
lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE
lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
lowerCAmelCase : Dict = [im.shape[-2:] for im in images]
lowerCAmelCase : Dict = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ):
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : List[Any] = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] )
lowerCAmelCase : str = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( _snake_case : str , _snake_case : List[Any] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ):
assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!"
lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=_snake_case )
tensor[:, 1].clamp_(min=0 , max=_snake_case )
tensor[:, 2].clamp_(min=0 , max=_snake_case )
tensor[:, 3].clamp_(min=0 , max=_snake_case )
| 314
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case__ : Optional[Any] = random.Random()
if is_torch_available():
import torch
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int]=1.0 , _snake_case : Optional[Any]=None , _snake_case : Any=None ):
if rng is None:
lowerCAmelCase : Tuple = global_rng
lowerCAmelCase : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case_( unittest.TestCase ):
def __init__( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : Optional[int]=4_0_0 , UpperCamelCase_ : Optional[int]=2_0_0_0 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : int=1_6_0_0_0 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[int]=True , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : Union[str, Any] = min_seq_length
lowerCAmelCase : Any = max_seq_length
lowerCAmelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : Optional[int] = feature_size
lowerCAmelCase : str = padding_value
lowerCAmelCase : List[str] = sampling_rate
lowerCAmelCase : Dict = return_attention_mask
lowerCAmelCase : Any = do_normalize
def lowerCamelCase__ ( self : Tuple ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=False ):
def _flatten(UpperCamelCase_ : Dict ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
lowerCAmelCase : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_( _A , unittest.TestCase ):
__UpperCamelCase = ASTFeatureExtractor
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = ASTFeatureExtractionTester(self )
def lowerCamelCase__ ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : Union[str, Any] = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowerCAmelCase : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
lowerCAmelCase : List[Any] = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_values
lowerCAmelCase : int = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase : str = np.asarray(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_values
lowerCAmelCase : Optional[int] = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ):
import torch
lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : List[str] = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase : Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase : Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
from datasets import load_dataset
lowerCAmelCase : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase : Optional[int] = ds.sort('''id''' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase__ ( self : Dict ):
# fmt: off
lowerCAmelCase : Optional[Any] = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase : Optional[Any] = self._load_datasamples(1 )
lowerCAmelCase : List[Any] = ASTFeatureExtractor()
lowerCAmelCase : Any = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 363
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase : str = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
lowerCAmelCase : List[str] = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] )
lowerCAmelCase : Optional[Any] = bert_tokens
lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case )
while start < end:
lowerCAmelCase : str = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : List[Any] = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j]
lowerCAmelCase : Union[str, Any] = start + i
lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : int = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
lowerCAmelCase : Optional[int] = []
for id in input_ids:
lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
lowerCAmelCase : Any = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[str] = f.readlines()
lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
snake_case__ : int = parser.parse_args()
main(args)
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : Tuple = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : str , _snake_case : PreTrainedTokenizer , _snake_case : int , _snake_case : Optional[int] = None , ):
lowerCAmelCase : Any = {}
if train_file is not None:
lowerCAmelCase : str = [train_file]
if eval_file is not None:
lowerCAmelCase : Any = [eval_file]
if test_file is not None:
lowerCAmelCase : Optional[int] = [test_file]
lowerCAmelCase : Tuple = datasets.load_dataset('''csv''' , data_files=_a )
lowerCAmelCase : Any = list(ds[list(files.keys() )[0]].features.keys() )
lowerCAmelCase : Union[str, Any] = features_name.pop(_a )
lowerCAmelCase : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowerCAmelCase : List[Any] = {label: i for i, label in enumerate(_a )}
lowerCAmelCase : Any = tokenizer.model_input_names
lowerCAmelCase : Union[str, Any] = {}
if len(_a ) == 1:
for k in files.keys():
lowerCAmelCase : Optional[Any] = ds[k].map(
lambda _snake_case : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_a , max_length=_a , padding='''max_length''' ) , batched=_a , )
elif len(_a ) == 2:
for k in files.keys():
lowerCAmelCase : int = ds[k].map(
lambda _snake_case : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_a , max_length=_a , padding='''max_length''' , ) , batched=_a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCAmelCase : Tuple = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCAmelCase : str = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCAmelCase : Tuple = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase : List[Any] = labelaid[ex[label_name]]
yield (d, label)
lowerCAmelCase : int = (
tf.data.Dataset.from_generator(
_a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCAmelCase : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowerCAmelCase : str = (
tf.data.Dataset.from_generator(
_a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCAmelCase : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowerCAmelCase : str = (
tf.data.Dataset.from_generator(
_a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCAmelCase : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case__ : int = logging.getLogger(__name__)
@dataclass
class snake_case_:
__UpperCamelCase = field(metadata={'''help''': '''Which column contains the label'''} )
__UpperCamelCase = field(default=__snake_case , metadata={'''help''': '''The path of the training file'''} )
__UpperCamelCase = field(default=__snake_case , metadata={'''help''': '''The path of the development file'''} )
__UpperCamelCase = field(default=__snake_case , metadata={'''help''': '''The path of the test file'''} )
__UpperCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCamelCase = field(
default=__snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class snake_case_:
__UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCamelCase = field(
default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=__snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCamelCase = field(default=__snake_case , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCamelCase = field(
default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _snake_case ( ):
lowerCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowerCAmelCase : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_a ) , labelaid=_a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowerCAmelCase : str = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
def compute_metrics(_snake_case : EvalPrediction ) -> Dict:
lowerCAmelCase : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCAmelCase : List[str] = TFTrainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase : Any = trainer.evaluate()
lowerCAmelCase : Any = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(_a )
return results
if __name__ == "__main__":
main()
| 365
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class snake_case_:
def __init__( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : str=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=True , UpperCamelCase_ : List[Any]=5_1_2 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : Optional[Any]=None , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : List[Any] = is_training
lowerCAmelCase : List[str] = use_input_mask
lowerCAmelCase : Optional[Any] = use_token_type_ids
lowerCAmelCase : Optional[Any] = use_labels
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Any = intermediate_multiple_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : Any = hidden_dropout
lowerCAmelCase : Any = attention_dropout
lowerCAmelCase : List[str] = weight_tying
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : Optional[Any] = type_vocab_size
lowerCAmelCase : Dict = type_sequence_label_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : Union[str, Any] = num_choices
lowerCAmelCase : int = scope
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_input_mask:
lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase__ ( self : Any ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Dict = self.prepare_config_and_inputs()
lowerCAmelCase : int = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = GPTNeoXJapaneseModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase : int = model(_lowercase , attention_mask=_lowercase )
lowerCAmelCase : Any = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : List[str] = True
lowerCAmelCase : Optional[Any] = GPTNeoXJapaneseModel(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase : Any = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : int ):
lowerCAmelCase : Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase : Tuple = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int ):
lowerCAmelCase : Dict = True
lowerCAmelCase : int = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
lowerCAmelCase : Tuple = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
lowerCAmelCase : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : int = model(_lowercase , attention_mask=_lowercase , output_hidden_states=_lowercase )
lowerCAmelCase : Optional[int] = output_from_no_past['''hidden_states'''][0]
lowerCAmelCase : Optional[int] = model(
_lowercase , attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__UpperCamelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = GPTNeoXJapaneseModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def lowerCamelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( self : List[Any] ):
# This regression test was failing with PyTorch < 1.3
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase : Any = None
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowercase )
@slow
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[str] = '''abeja/gpt-neox-japanese-2.7b'''
lowerCAmelCase : List[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
lowerCAmelCase : Dict = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
lowerCAmelCase : Dict = GPTNeoXJapaneseTokenizer.from_pretrained(_lowercase )
lowerCAmelCase : str = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowercase )
lowerCAmelCase : Tuple = []
for prompt in prompts:
lowerCAmelCase : Any = tokenizer(_lowercase , return_tensors='''pt''' ).input_ids
lowerCAmelCase : int = model.generate(_lowercase , max_length=5_0 )
lowerCAmelCase : int = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
predicted_outputs += generated_string
self.assertListEqual(_lowercase , _lowercase )
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class snake_case_( lowerCamelCase_ ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Union[str, Any] = dataset
lowerCAmelCase : List[Any] = process
lowerCAmelCase : Dict = params
def __len__( self : List[Any] ):
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Tuple = self.dataset[i]
lowerCAmelCase : Dict = self.process(_UpperCAmelCase , **self.params )
return processed
class snake_case_( lowerCamelCase_ ):
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any]=None ):
lowerCAmelCase : Optional[Any] = loader
lowerCAmelCase : List[str] = infer
lowerCAmelCase : Optional[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Union[str, Any] = loader_batch_size
# Internal bookkeeping
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
def __len__( self : Tuple ):
return len(self.loader )
def __iter__( self : Dict ):
lowerCAmelCase : int = iter(self.loader )
return self
def lowerCamelCase__ ( self : Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCAmelCase : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCAmelCase : str = {}
for k, element in self._loader_batch_data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# Convert ModelOutput to tuple first
lowerCAmelCase : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCAmelCase : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCAmelCase : Any = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCAmelCase : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCAmelCase : Any = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCAmelCase : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase : List[str] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase : Optional[int] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCAmelCase : Optional[int] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCAmelCase : List[Any] = self._loader_batch_data.__class__(_UpperCAmelCase )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self : List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCAmelCase : str = next(self.iterator )
lowerCAmelCase : int = self.infer(_UpperCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_UpperCAmelCase , torch.Tensor ):
lowerCAmelCase : str = processed
else:
lowerCAmelCase : Union[str, Any] = list(processed.keys() )[0]
lowerCAmelCase : Dict = processed[key]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase : Any = len(_UpperCAmelCase )
else:
lowerCAmelCase : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase : str = observed_batch_size
# Setting internal index to unwrap the batch
lowerCAmelCase : Optional[int] = processed
lowerCAmelCase : Dict = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class snake_case_( lowerCamelCase_ ):
def __init__( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str]=None ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __iter__( self : int ):
lowerCAmelCase : Dict = iter(self.loader )
lowerCAmelCase : Any = None
return self
def lowerCamelCase__ ( self : Any ):
if self.subiterator is None:
lowerCAmelCase : Optional[int] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCAmelCase : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCAmelCase : Union[str, Any] = self.infer(next(self.iterator ) , **self.params )
lowerCAmelCase : Optional[int] = next(self.subiterator )
return processed
class snake_case_( lowerCamelCase_ ):
def __iter__( self : List[str] ):
lowerCAmelCase : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase : Optional[Any] = self.loader_batch_item()
lowerCAmelCase : List[Any] = item.pop('''is_last''' )
accumulator.append(_UpperCAmelCase )
if is_last:
return accumulator
while not is_last:
lowerCAmelCase : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_UpperCAmelCase , torch.Tensor ):
lowerCAmelCase : Optional[int] = processed
else:
lowerCAmelCase : Optional[int] = list(processed.keys() )[0]
lowerCAmelCase : Tuple = processed[key]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase )
else:
lowerCAmelCase : str = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase : List[Any] = observed_batch_size
lowerCAmelCase : Optional[Any] = processed
lowerCAmelCase : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase : Optional[int] = self.loader_batch_item()
lowerCAmelCase : List[Any] = item.pop('''is_last''' )
accumulator.append(_UpperCAmelCase )
if is_last:
return accumulator
else:
lowerCAmelCase : List[str] = processed
lowerCAmelCase : Optional[Any] = item.pop('''is_last''' )
accumulator.append(_UpperCAmelCase )
return accumulator
class snake_case_( lowerCamelCase_ ):
def __init__( self : int , UpperCamelCase_ : Dataset , UpperCamelCase_ : str ):
lowerCAmelCase : str = dataset
lowerCAmelCase : List[str] = key
def __len__( self : int ):
return len(self.dataset )
def __getitem__( self : Optional[Any] , UpperCamelCase_ : Optional[int] ):
return self.dataset[i][self.key]
class snake_case_( lowerCamelCase_ ):
def __init__( self : Dict , UpperCamelCase_ : Dataset , UpperCamelCase_ : str , UpperCamelCase_ : str ):
lowerCAmelCase : Optional[int] = dataset
lowerCAmelCase : List[Any] = keya
lowerCAmelCase : Optional[Any] = keya
def __len__( self : Any ):
return len(self.dataset )
def __getitem__( self : List[Any] , UpperCamelCase_ : List[Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 367
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Optional[Any] = '''
import os
'''
snake_case__ : Tuple = '''
def foo():
import os
return False
'''
snake_case__ : Any = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : int = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : Tuple = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 314
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : int = '''▁'''
snake_case__ : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
snake_case__ : List[str] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
snake_case__ : int = {'''vinai/bartpho-syllable''': 1_024}
class snake_case_( _a ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Any="</s>" , UpperCamelCase_ : Tuple="</s>" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : Dict="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Optional[Any]="<mask>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
lowerCAmelCase : int = vocab_file
lowerCAmelCase : Optional[int] = monolingual_vocab_file
lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Optional[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase : Tuple = cnt
cnt += 1
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowerCAmelCase : int = line.strip().split()[0]
lowerCAmelCase : Any = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase : str = len(self.fairseq_tokens_to_ids )
lowerCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
lowerCAmelCase : int = self.__dict__.copy()
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase : Dict = {}
lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Any = [self.sep_token_id]
lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : int = """""".join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Tuple = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(__lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 368
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ):
super().__init__()
lowerCAmelCase : Dict = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : Union[str, Any] = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : str = name
def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : str = global_step_float / warmup_steps_float
lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ):
lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , )
if num_warmup_steps:
lowerCAmelCase : List[str] = WarmUp(
initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , )
if weight_decay_rate > 0.0:
lowerCAmelCase : Dict = AdamWeightDecay(
learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , )
else:
lowerCAmelCase : Any = tf.keras.optimizers.Adam(
learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case_( a__ ):
def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = weight_decay_rate
lowerCAmelCase : List[str] = include_in_weight_decay
lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ):
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : Dict = apply_state or {}
lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ):
lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class snake_case_( a__ ):
def __init__( self : Any ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = None
@property
def lowerCamelCase__ ( self : List[str] ):
if self._accum_steps is None:
lowerCAmelCase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Any ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
if not self._gradients:
lowerCAmelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 314
| 0
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : int , _snake_case : Union[str, Any] ):
lowerCAmelCase : Optional[int] = multiprocessing.Manager()
lowerCAmelCase : Tuple = manager.list()
lowerCAmelCase : List[Any] = multiprocessing.Process(target=a__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _snake_case ( _snake_case : Any , _snake_case : int , _snake_case : Any ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCAmelCase : List[str] = shutil.rmtree
lowerCAmelCase : Union[str, Any] = os.rmdir
lowerCAmelCase : Tuple = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCAmelCase : List[Any] = {}
with swallow_io():
with time_limit(a__ ):
exec(a__ , a__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
lowerCAmelCase : int = rmtree
lowerCAmelCase : Dict = rmdir
lowerCAmelCase : Tuple = chdir
@contextlib.contextmanager
def _snake_case ( _snake_case : str ):
def signal_handler(_snake_case : Any , _snake_case : int ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , a__ )
signal.signal(signal.SIGALRM , a__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _snake_case ( ):
lowerCAmelCase : Tuple = WriteOnlyStringIO()
with contextlib.redirect_stdout(a__ ):
with contextlib.redirect_stderr(a__ ):
with redirect_stdin(a__ ):
yield
@contextlib.contextmanager
def _snake_case ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(a__ ):
yield dirname
class snake_case_( a__ ):
pass
class snake_case_( io.StringIO ):
def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Dict ):
raise OSError
def lowerCamelCase__ ( self : str , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[Any] ):
raise OSError
def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[Any] ):
raise OSError
def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : List[Any] ):
return False
class snake_case_( contextlib._RedirectStream ): # type: ignore
__UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def _snake_case ( _snake_case : List[Any] ):
if root == ".":
yield
return
lowerCAmelCase : Dict = os.getcwd()
os.chdir(a__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a__ )
def _snake_case ( _snake_case : List[str]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Dict = None
import os
lowerCAmelCase : Dict = '''1'''
lowerCAmelCase : Dict = None
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
lowerCAmelCase : str = None
lowerCAmelCase : int = None
lowerCAmelCase : Any = None
lowerCAmelCase : Any = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
lowerCAmelCase : List[str] = None
lowerCAmelCase : Dict = None
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : int = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Any = None
lowerCAmelCase : int = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = None
lowerCAmelCase : Dict = None
import shutil
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : str = None
import subprocess
lowerCAmelCase : Dict = None # type: ignore
lowerCAmelCase : List[str] = None
import sys
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Dict = None
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
lowerCAmelCase : List[str] = None
| 369
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
snake_case__ = parser.parse_args()
if args.model_type == "bert":
snake_case__ = BertForMaskedLM.from_pretrained(args.model_name)
snake_case__ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
snake_case__ = model.state_dict()
snake_case__ = {}
for w in ["word_embeddings", "position_embeddings"]:
snake_case__ = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
snake_case__ = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
snake_case__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
snake_case__ = state_dict['''cls.predictions.decoder.weight''']
snake_case__ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
snake_case__ = state_dict[f"""cls.predictions.transform.dense.{w}"""]
snake_case__ = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 370
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _snake_case ( _snake_case : Optional[int] ):
lowerCAmelCase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape
lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase : Tuple = emb.weight.data
return lin_layer
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ):
lowerCAmelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
lowerCAmelCase : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCAmelCase : Tuple = state_dict[old_key]
return new_dict
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ):
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Tuple = 0
os.makedirs(_snake_case , exist_ok=_snake_case )
for expert in range(_snake_case ):
lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(_snake_case ):
lowerCAmelCase : List[str] = torch.load(_snake_case )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Any = os.path.join(
_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
torch.save(_snake_case , _snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_snake_case )[0]].dtype )
# Add the last block
lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_snake_case ) == 1:
lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case )
torch.save(_snake_case , _snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_snake_case , _snake_case )
# Otherwise, let's build the index
lowerCAmelCase : Dict = {}
for idx, shard in enumerate(_snake_case ):
lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' )
lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) )
for key in shard:
lowerCAmelCase : List[Any] = shard_file
# Add the metadata
lowerCAmelCase : Dict = {'''total_size''': total_size}
lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n'''
f.write(_snake_case )
return metadata, index
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
snake_case__ : List[str] = parser.parse_args()
snake_case__ , snake_case__ : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
snake_case__ : str = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 314
| 0
|
"""simple docstring"""
import math
def _snake_case ( _snake_case : Optional[int] ):
if not isinstance(_snake_case , _snake_case ):
lowerCAmelCase : Dict = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 1:
lowerCAmelCase : Tuple = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase : int = int(math.log(number // 3 , 2 ) ) + 2
lowerCAmelCase : Tuple = [3, 5]
lowerCAmelCase : List[Any] = 2
lowerCAmelCase : int = 3
for block in range(1 , _snake_case ):
for _ in range(_snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
snake_case__ : Optional[Any] = 0
try:
snake_case__ : Union[str, Any] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 371
|
"""simple docstring"""
from math import sqrt
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase : Dict = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase : Optional[int] = False
for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase : int = False
break
# precondition
assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool"
return status
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) )
lowerCAmelCase : Optional[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_snake_case ) ):
for j in range(i + 1 , len(_snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase : Any = 0
# filters actual prime numbers.
lowerCAmelCase : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase : Tuple = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_snake_case ):
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase : Dict = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : List[str] = number
if number == 0 or number == 1:
ans.append(_snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_snake_case ):
while quotient != 1:
if is_prime(_snake_case ) and (quotient % factor == 0):
ans.append(_snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : Tuple ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : Optional[Any] = 0
# prime factorization of 'number'
lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Any = max(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Dict ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : int = 0
# prime factorization of 'number'
lowerCAmelCase : List[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Optional[int] = min(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( _snake_case : Tuple ):
assert (
isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case )
), "'number' must been an int, even and > 2"
lowerCAmelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case )
lowerCAmelCase : Optional[Any] = len(_snake_case )
# run variable for while-loops.
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase : str = True
while i < len_pn and loop:
lowerCAmelCase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase : Dict = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (len(_snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Dict = 0
while numbera != 0:
lowerCAmelCase : Union[str, Any] = numbera % numbera
lowerCAmelCase : List[Any] = numbera
lowerCAmelCase : List[Any] = rest
# precondition
assert isinstance(_snake_case , _snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase : List[str] = prime_factorization(_snake_case )
lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case )
elif numbera == 1 or numbera == 1:
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[str] = max(_snake_case , _snake_case )
lowerCAmelCase : Dict = 0
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case )
lowerCAmelCase : Any = prime_fac_a.count(_snake_case )
for _ in range(max(_snake_case , _snake_case ) ):
ans *= n
else:
lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( _snake_case : Any ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_snake_case ):
ans += 1
# precondition
assert isinstance(_snake_case , _snake_case ) and is_prime(
_snake_case ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Dict ):
assert (
is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number
lowerCAmelCase : str = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
while number < p_number_a:
ans.append(_snake_case )
number += 1
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and ans[0] != p_number_a
and ans[len(_snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( _snake_case : List[Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_snake_case )
# precondition
assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase : int = get_divisors(_snake_case )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (divisors[0] == 1)
and (divisors[len(_snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( _snake_case : Optional[int] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase : Optional[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase : Dict = 0
lowerCAmelCase : Dict = 1
lowerCAmelCase : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase : int = ans
ans += fiba
lowerCAmelCase : Optional[Any] = tmp
return ans
| 314
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
snake_case__ : str = None
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
snake_case__ : Union[str, Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
snake_case__ : List[str] = {
"camembert-base": 512,
}
snake_case__ : int = "▁"
class snake_case_( a_ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = CamembertTokenizer
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : Union[str, Any]="</s>" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Dict="<pad>" , UpperCamelCase_ : Union[str, Any]="<mask>" , UpperCamelCase_ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Dict = vocab_file
lowerCAmelCase : List[str] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict = None ):
lowerCAmelCase : int = [self.sep_token_id]
lowerCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 350
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Any = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_( a__ ):
__UpperCamelCase = '''vit_msn'''
def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : List[str] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
| 314
| 0
|
"""simple docstring"""
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _snake_case ( _snake_case : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = botoa.client('''iam''' )
lowerCAmelCase : List[str] = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_SCREAMING_SNAKE_CASE , AssumeRolePolicyDocument=json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
lowerCAmelCase : Dict = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_SCREAMING_SNAKE_CASE , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def _snake_case ( _snake_case : Optional[int] ):
lowerCAmelCase : List[str] = botoa.client('''iam''' )
return iam_client.get_role(RoleName=_SCREAMING_SNAKE_CASE )["Role"]["Arn"]
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , _SCREAMING_SNAKE_CASE , )
lowerCAmelCase : List[str] = None
if credentials_configuration == 0:
lowerCAmelCase : Dict = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
lowerCAmelCase : Union[str, Any] = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
lowerCAmelCase : List[str] = _ask_field('''AWS Access Key ID: ''' )
lowerCAmelCase : Optional[int] = aws_access_key_id
lowerCAmelCase : int = _ask_field('''AWS Secret Access Key: ''' )
lowerCAmelCase : int = aws_secret_access_key
lowerCAmelCase : Optional[Any] = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
lowerCAmelCase : Optional[Any] = aws_region
lowerCAmelCase : Dict = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , _SCREAMING_SNAKE_CASE , )
if role_management == 0:
lowerCAmelCase : Dict = _ask_field('''Enter your IAM role name: ''' )
else:
lowerCAmelCase : Dict = "accelerate_sagemaker_execution_role"
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(_SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowerCAmelCase : Any = None
if is_custom_docker_image:
lowerCAmelCase : Optional[Any] = _ask_field('''Enter your Docker image: ''' , lambda _snake_case : str(_SCREAMING_SNAKE_CASE ).lower() )
lowerCAmelCase : List[str] = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowerCAmelCase : Any = None
if is_sagemaker_inputs_enabled:
lowerCAmelCase : Tuple = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda _snake_case : str(_SCREAMING_SNAKE_CASE ).lower() , )
lowerCAmelCase : Optional[int] = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowerCAmelCase : List[str] = None
if is_sagemaker_metrics_enabled:
lowerCAmelCase : int = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda _snake_case : str(_SCREAMING_SNAKE_CASE ).lower() , )
lowerCAmelCase : str = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
if use_dynamo:
lowerCAmelCase : Any = "dynamo_"
lowerCAmelCase : str = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowerCAmelCase : int = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
if use_custom_options:
lowerCAmelCase : int = _ask_options(
'''Which mode do you want to use?''' , _SCREAMING_SNAKE_CASE , lambda _snake_case : TORCH_DYNAMO_MODES[int(_SCREAMING_SNAKE_CASE )] , default='''default''' , )
lowerCAmelCase : Any = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowerCAmelCase : Union[str, Any] = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowerCAmelCase : str = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
lowerCAmelCase : Tuple = _ask_options(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lambda _snake_case : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_SCREAMING_SNAKE_CASE )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowerCAmelCase : Union[str, Any] = _ask_field(_SCREAMING_SNAKE_CASE , lambda _snake_case : str(_SCREAMING_SNAKE_CASE ).lower() , default='''ml.p3.2xlarge''' )
lowerCAmelCase : List[Any] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowerCAmelCase : Any = _ask_field(
'''How many machines do you want use? [1]: ''' , _SCREAMING_SNAKE_CASE , default=1 , )
lowerCAmelCase : List[Any] = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=_SCREAMING_SNAKE_CASE , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_SCREAMING_SNAKE_CASE , use_cpu=_SCREAMING_SNAKE_CASE , dynamo_config=_SCREAMING_SNAKE_CASE , eca_instance_type=_SCREAMING_SNAKE_CASE , profile=_SCREAMING_SNAKE_CASE , region=_SCREAMING_SNAKE_CASE , iam_role_name=_SCREAMING_SNAKE_CASE , mixed_precision=_SCREAMING_SNAKE_CASE , num_machines=_SCREAMING_SNAKE_CASE , sagemaker_inputs_file=_SCREAMING_SNAKE_CASE , sagemaker_metrics_file=_SCREAMING_SNAKE_CASE , )
| 351
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case )
lowerCAmelCase : Optional[int] = {
'''repo_id''': str(_snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_snake_case , _snake_case , indent=4 )
def _snake_case ( _snake_case : Any ):
if params.n_gpu <= 0:
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Dict = True
lowerCAmelCase : int = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
lowerCAmelCase : int = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node
lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node
lowerCAmelCase : str = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = 1
lowerCAmelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase : List[Any] = params.n_nodes > 1
# summary
lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _snake_case ( _snake_case : Optional[int] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 314
| 0
|
"""simple docstring"""
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _snake_case ( _snake_case : Union[str, Any]=None ):
if subparsers is not None:
lowerCAmelCase : List[str] = subparsers.add_parser('''test''' )
else:
lowerCAmelCase : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=UpperCamelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowerCAmelCase : Union[str, Any] = script_name
else:
lowerCAmelCase : str = f'''--config_file={args.config_file} {script_name}'''
lowerCAmelCase : List[Any] = ['''accelerate-launch'''] + test_args.split()
lowerCAmelCase : List[str] = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _snake_case ( ):
lowerCAmelCase : Optional[int] = test_command_parser()
lowerCAmelCase : Union[str, Any] = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 352
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
lowerCAmelCase : str = sylvester(number - 1 )
lowerCAmelCase : Optional[Any] = num - 1
lowerCAmelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 314
| 0
|
"""simple docstring"""
import os
def _snake_case ( _snake_case : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file:
lowerCAmelCase : Optional[int] = in_file.read()
lowerCAmelCase : List[str] = [[int(UpperCAmelCase_ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
lowerCAmelCase : Optional[Any] = [[0 for cell in row] for row in grid]
lowerCAmelCase : Optional[int] = len(grid[0] )
lowerCAmelCase : Tuple = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
lowerCAmelCase : Tuple = grid[0][0]
for i in range(1 , UpperCAmelCase_ ):
lowerCAmelCase : int = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase_ ):
lowerCAmelCase : List[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase_ ):
for j in range(1 , UpperCAmelCase_ ):
lowerCAmelCase : Dict = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 353
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase : Union[str, Any] = 6
lowerCAmelCase : Any = 128
lowerCAmelCase : List[Any] = (2, 2, 18, 2)
lowerCAmelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase : Tuple = 12
lowerCAmelCase : Dict = 192
lowerCAmelCase : List[str] = (2, 2, 18, 2)
lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCAmelCase : Optional[int] = window_size
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : int = num_heads
return config
def _snake_case ( _snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase : str = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase : List[Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : Optional[Any] = int(key_split[4] )
lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase : Dict = val[:dim, :]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : str = val[
:dim
]
lowerCAmelCase : List[str] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[Any] = val[
-dim:
]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ):
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model''']
lowerCAmelCase : List[Any] = get_swin_config(_snake_case )
lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314
| 0
|
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=[] ):
lowerCAmelCase : Tuple = size[0] - overlap_pixels * 2
lowerCAmelCase : Optional[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCAmelCase : List[str] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCAmelCase : Tuple = np.pad(_lowerCAmelCase , mode='''linear_ramp''' , pad_width=_lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
lowerCAmelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCAmelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCAmelCase : Optional[int] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCAmelCase : Dict = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _snake_case ( _snake_case : Any , _snake_case : Any , _snake_case : List[Any] ):
return max(_lowerCAmelCase , min(_lowerCAmelCase , _lowerCAmelCase ) )
def _snake_case ( _snake_case : [int] , _snake_case : [int] , _snake_case : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _snake_case ( _snake_case : [int] , _snake_case : int , _snake_case : [int] ):
lowerCAmelCase : Dict = list(_lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCAmelCase : Optional[int] = clamp_rect(_lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : str , _snake_case : Dict ):
lowerCAmelCase : Optional[int] = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_lowerCAmelCase , (original_slice, 0) )
return result
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCAmelCase : List[Any] = tile.crop(_lowerCAmelCase )
return tile
def _snake_case ( _snake_case : Any , _snake_case : List[str] ):
lowerCAmelCase : int = n % d
return n - divisor
class snake_case_( lowerCamelCase__ ):
def __init__( self : Tuple , UpperCamelCase_ : AutoencoderKL , UpperCamelCase_ : CLIPTextModel , UpperCamelCase_ : CLIPTokenizer , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : DDPMScheduler , UpperCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_ : int = 3_5_0 , ):
super().__init__(
vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , max_noise_level=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCAmelCase : Optional[Any] = add_overlap_rect(UpperCamelCase_ , UpperCamelCase_ , image.size )
lowerCAmelCase : List[str] = image.crop(UpperCamelCase_ )
lowerCAmelCase : int = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCAmelCase : str = translated_slice_x - (original_image_slice / 2)
lowerCAmelCase : int = max(0 , UpperCamelCase_ )
lowerCAmelCase : int = squeeze_tile(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = to_input.size
lowerCAmelCase : str = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCAmelCase : str = super(UpperCamelCase_ , self ).__call__(image=UpperCamelCase_ , **UpperCamelCase_ ).images[0]
lowerCAmelCase : List[str] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCAmelCase : int = unsqueeze_tile(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCAmelCase : List[str] = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
lowerCAmelCase : Union[str, Any] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCamelCase_ ) , mode='''L''' , )
final_image.paste(
UpperCamelCase_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCamelCase_ )
@torch.no_grad()
def __call__( self : int , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_ : int = 7_5 , UpperCamelCase_ : float = 9.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 1_2_8 , UpperCamelCase_ : int = 3_2 , UpperCamelCase_ : int = 3_2 , ):
lowerCAmelCase : List[Any] = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
lowerCAmelCase : List[Any] = math.ceil(image.size[0] / tile_size )
lowerCAmelCase : Union[str, Any] = math.ceil(image.size[1] / tile_size )
lowerCAmelCase : Any = tcx * tcy
lowerCAmelCase : str = 0
for y in range(UpperCamelCase_ ):
for x in range(UpperCamelCase_ ):
self._process_tile(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , prompt=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , noise_level=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def _snake_case ( ):
# Run a demo
lowerCAmelCase : List[Any] = "stabilityai/stable-diffusion-x4-upscaler"
lowerCAmelCase : Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(_lowerCAmelCase , revision='''fp16''' , torch_dtype=torch.floataa )
lowerCAmelCase : Union[str, Any] = pipe.to('''cuda''' )
lowerCAmelCase : List[str] = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(_snake_case : Optional[int] ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('''diffusers_library_progress.jpg''' )
lowerCAmelCase : Optional[int] = pipe(image=_lowerCAmelCase , prompt='''Black font, white background, vector''' , noise_level=40 , callback=_lowerCAmelCase )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 354
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = image[0].size
lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase : List[str] = 2.0 * image - 1.0
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 )
return image
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : str = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = mask[0].size
lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 )
return mask
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = image
lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ )
lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ )
lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : Union[str, Any] = original_image.shape
lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device )
lowerCAmelCase : Optional[int] = eta
lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1
lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = t
lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
snake_case__ : Tuple = None
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
snake_case__ : Dict = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
snake_case__ : Optional[int] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
snake_case__ : Optional[Any] = "▁"
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''token_type_ids''']
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , UpperCamelCase_ : str=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : Dict="[SEP]" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Dict="[CLS]" , UpperCamelCase_ : Union[str, Any]="[MASK]" , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase : List[str] = (
AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ , normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
else mask_token
)
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase : str = do_lower_case
lowerCAmelCase : Union[str, Any] = remove_space
lowerCAmelCase : List[str] = keep_accents
lowerCAmelCase : Tuple = vocab_file
lowerCAmelCase : Dict = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int = None ):
lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] = None ):
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 355
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : int = -1
lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : str = TextStreamer(UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Any = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
lowerCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = -1
lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = -1
lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : str = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : List[str] = ''''''
for new_text in streamer:
streamer_text += new_text
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : List[str] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['MaskFormerFeatureExtractor']
snake_case__ : int = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
snake_case__ : str = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 356
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case__ : Optional[Any] = False
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ):
set_seed(0 )
lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 )
lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
lowerCAmelCase : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 314
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case_( A__ ):
__UpperCamelCase = 'realm'
def __init__( self : Tuple , UpperCamelCase_ : List[Any]=3_0_5_2_2 , UpperCamelCase_ : Any=7_6_8 , UpperCamelCase_ : str=1_2_8 , UpperCamelCase_ : Tuple=1_2 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : str=8 , UpperCamelCase_ : Tuple=3_0_7_2 , UpperCamelCase_ : List[str]="gelu_new" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Optional[Any]=5_1_2 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Union[str, Any]=2_5_6 , UpperCamelCase_ : Optional[int]=1_0 , UpperCamelCase_ : Optional[int]=1E-3 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : List[Any]=3_2_0 , UpperCamelCase_ : Dict=1_3_3_5_3_7_1_8 , UpperCamelCase_ : Optional[Any]=5_0_0_0 , UpperCamelCase_ : Optional[Any]=1 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : str=2 , **UpperCamelCase_ : Tuple , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
# Common config
lowerCAmelCase : str = vocab_size
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Union[str, Any] = retriever_proj_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : List[str] = num_candidates
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : str = initializer_range
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : List[Any] = layer_norm_eps
# Reader config
lowerCAmelCase : str = span_hidden_size
lowerCAmelCase : int = max_span_width
lowerCAmelCase : Dict = reader_layer_norm_eps
lowerCAmelCase : Tuple = reader_beam_size
lowerCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
lowerCAmelCase : str = num_block_records
lowerCAmelCase : Union[str, Any] = searcher_beam_size
| 357
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ):
lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ )
lowerCAmelCase : Any = nsfw_detected.flatten()
lowerCAmelCase : Dict = nsfw_detected > p_threshold
lowerCAmelCase : int = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowerCAmelCase : List[Any] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Union[str, Any] = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 314
| 0
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] = True , _snake_case : int = math.inf , _snake_case : Tuple = -math.inf , _snake_case : Union[str, Any] = math.inf , _snake_case : List[Any] = -math.inf , _snake_case : Dict = False , _snake_case : Any = 100 , _snake_case : Dict = 0.01 , _snake_case : List[Any] = 1 , ):
lowerCAmelCase : Dict = False
lowerCAmelCase : Tuple = search_prob
lowerCAmelCase : Optional[int] = start_temperate
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Dict = 0
lowerCAmelCase : Union[str, Any] = None
while not search_end:
lowerCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCAmelCase : Optional[Any] = current_state
scores.append(A_ )
iterations += 1
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCAmelCase : Optional[int] = random.randint(0 , len(A_ ) - 1 ) # picking a random neighbor
lowerCAmelCase : Dict = neighbors.pop(A_ )
lowerCAmelCase : Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCAmelCase : Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCAmelCase : int = picked_neighbor
else:
lowerCAmelCase : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCAmelCase : Optional[int] = picked_neighbor
lowerCAmelCase : Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCAmelCase : Optional[Any] = True
else:
lowerCAmelCase : Any = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ) , A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
snake_case__ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
snake_case__ : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case__ : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[Any] ):
return (3 * x**2) - (6 * y)
snake_case__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case__ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f"""{local_min.score()}"""
)
snake_case__ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case__ : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f"""{local_min.score()}"""
)
| 358
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Union[str, Any] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
snake_case__ : Optional[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BertTokenizer
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCAmelCase : Tuple = do_lower_case
lowerCAmelCase : Union[str, Any] = strip_accents
lowerCAmelCase : Tuple = tokenize_chinese_chars
lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ):
lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import sys
snake_case__ : List[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _snake_case ( _snake_case : str = N ):
lowerCAmelCase : Tuple = -sys.maxsize - 1
for i in range(len(_lowercase ) - 12 ):
lowerCAmelCase : str = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCAmelCase : str = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 359
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case_( metaclass=UpperCAmelCase__ ):
__UpperCamelCase = ['speech']
def __init__( self : str , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Tuple ):
requires_backends(self , ['''speech'''] )
class snake_case_( metaclass=UpperCAmelCase__ ):
__UpperCamelCase = ['speech']
def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Any ):
requires_backends(self , ['''speech'''] )
| 360
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 50000000 ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) )
lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) )
for primea in primes:
lowerCAmelCase : Optional[Any] = primea * primea
for primea in primes:
lowerCAmelCase : List[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCAmelCase : Tuple = primea * primea * primea * primea
lowerCAmelCase : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
| 0
|
"""simple docstring"""
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case_( a__ , a__ ):
__UpperCamelCase = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase_ : List[str]=2_0_0_0 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[int]=2_0 , UpperCamelCase_ : str=1E-3 ):
lowerCAmelCase : Any = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Union[str, Any] = None
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Dict = None ):
lowerCAmelCase : Tuple = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase_ , device=lowerCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase : Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase : Union[str, Any] = std.unsqueeze(-1 )
lowerCAmelCase : int = -score / std
# compute
lowerCAmelCase : Optional[Any] = -1.0 / len(self.timesteps )
lowerCAmelCase : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase : Optional[int] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase : Union[str, Any] = beta_t.unsqueeze(-1 )
lowerCAmelCase : List[Any] = -0.5 * beta_t * x
lowerCAmelCase : int = torch.sqrt(lowerCamelCase_ )
lowerCAmelCase : Any = drift - diffusion**2 * score
lowerCAmelCase : Dict = x + drift * dt
# add noise
lowerCAmelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase_ , device=x.device , dtype=x.dtype )
lowerCAmelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Any ):
return self.config.num_train_timesteps
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Tuple = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor''']
snake_case__ : List[Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
snake_case__ : Optional[Any] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class snake_case_( lowerCamelCase__ ):
def __init__( self : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ):
super().__init__(*snake_case__ , **snake_case__ )
self.check_model_type(snake_case__ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : int=None , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : Tuple ):
lowerCAmelCase : Tuple = {}, {}
if padding is not None:
lowerCAmelCase : Optional[int] = padding
if truncation is not None:
lowerCAmelCase : str = truncation
if top_k is not None:
lowerCAmelCase : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict = None , **UpperCamelCase_ : Tuple ):
if isinstance(snake_case__ , (Image.Image, str) ) and isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Any = {'''image''': image, '''question''': question}
else:
lowerCAmelCase : List[Any] = image
lowerCAmelCase : List[Any] = super().__call__(snake_case__ , **snake_case__ )
return results
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : Dict=False ):
lowerCAmelCase : List[str] = load_image(inputs['''image'''] )
lowerCAmelCase : Tuple = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=snake_case__ , truncation=snake_case__ )
lowerCAmelCase : str = self.image_processor(images=snake_case__ , return_tensors=self.framework )
model_inputs.update(snake_case__ )
return model_inputs
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = self.model(**snake_case__ )
return model_outputs
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase : Dict = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase : Dict = model_outputs.logits.sigmoid()[0]
lowerCAmelCase : int = probs.topk(snake_case__ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCAmelCase : Optional[int] = scores.tolist()
lowerCAmelCase : Union[str, Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case__ , snake_case__ )]
| 362
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ):
lowerCAmelCase : Tuple = '''bilinear'''
lowerCAmelCase : List[Any] = max_size
lowerCAmelCase : Optional[int] = short_edge_length
def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = []
for img in imgs:
lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w
else:
lowerCAmelCase, lowerCAmelCase : int = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = newh * scale
lowerCAmelCase : str = neww * scale
lowerCAmelCase : Union[str, Any] = int(neww + 0.5 )
lowerCAmelCase : str = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCAmelCase : Optional[int] = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class snake_case_:
def __init__( self : Tuple , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT
lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY
lowerCAmelCase : int = cfg.PAD_VALUE
lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE
lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
lowerCAmelCase : Dict = [im.shape[-2:] for im in images]
lowerCAmelCase : Dict = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ):
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : List[Any] = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] )
lowerCAmelCase : str = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( _snake_case : str , _snake_case : List[Any] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ):
assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!"
lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=_snake_case )
tensor[:, 1].clamp_(min=0 , max=_snake_case )
tensor[:, 2].clamp_(min=0 , max=_snake_case )
tensor[:, 3].clamp_(min=0 , max=_snake_case )
| 314
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : Optional[Any] ):
lowerCAmelCase : List[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCAmelCase : Optional[Any] = [144, 192, 240]
lowerCAmelCase : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCAmelCase : Tuple = [96, 120, 144]
lowerCAmelCase : List[Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCAmelCase : List[Any] = [64, 80, 96]
lowerCAmelCase : int = [16, 16, 24, 48, 64, 80, 320]
lowerCAmelCase : List[str] = 0.05
lowerCAmelCase : Optional[Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCAmelCase : List[Any] = 512
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : Optional[int] = 21
lowerCAmelCase : int = "pascal-voc-id2label.json"
else:
lowerCAmelCase : Any = 1000
lowerCAmelCase : Any = "imagenet-1k-id2label.json"
lowerCAmelCase : Tuple = "huggingface/label-files"
lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase : Optional[int] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCAmelCase : int = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=False ):
for i in range(1 , 6 ):
if f'''layer_{i}.''' in name:
lowerCAmelCase : Dict = name.replace(f'''layer_{i}.''' , f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
lowerCAmelCase : str = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCAmelCase : int = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCAmelCase : List[str] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCAmelCase : Dict = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCAmelCase : int = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCAmelCase : Any = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCAmelCase : Optional[int] = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCAmelCase : Dict = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCAmelCase : Dict = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f'''.{i}.{j}.''' in name:
lowerCAmelCase : Dict = name.replace(f'''.{i}.{j}.''' , f'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f'''.{i}.{j}.''' in name:
lowerCAmelCase : List[str] = name.replace(f'''.{i}.{j}.''' , f'''.{i}.''' )
if "expand_1x1" in name:
lowerCAmelCase : Dict = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCAmelCase : str = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCAmelCase : List[str] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f'''.global_rep.{i}.weight''' in name:
lowerCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''' , '''.layernorm.weight''' )
if f'''.global_rep.{i}.bias''' in name:
lowerCAmelCase : Union[str, Any] = name.replace(f'''.global_rep.{i}.bias''' , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCAmelCase : Union[str, Any] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCAmelCase : Dict = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCAmelCase : Optional[int] = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCAmelCase : Union[str, Any] = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCAmelCase : Any = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCAmelCase : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCAmelCase : int = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCAmelCase : Optional[Any] = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCAmelCase : Dict = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCAmelCase : str = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCAmelCase : int = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCAmelCase : Optional[Any] = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCAmelCase : str = "mobilevit." + name
return name
def _snake_case ( _snake_case : str , _snake_case : Optional[Any] , _snake_case : Any=False ):
if base_model:
lowerCAmelCase : Tuple = ""
else:
lowerCAmelCase : Any = "mobilevit."
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Any = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCAmelCase : str = key[8:]
if "qkv" in key:
lowerCAmelCase : Union[str, Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[0][6:] ) - 1
lowerCAmelCase : Union[str, Any] = int(key_split[3] )
lowerCAmelCase : Union[str, Any] = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
lowerCAmelCase : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCAmelCase : List[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
lowerCAmelCase : Tuple = val[:dim, :]
lowerCAmelCase : int = val[dim : dim * 2, :]
lowerCAmelCase : List[Any] = val[-dim:, :]
else:
lowerCAmelCase : str = val[:dim]
lowerCAmelCase : Optional[Any] = val[dim : dim * 2]
lowerCAmelCase : Union[str, Any] = val[-dim:]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( ):
lowerCAmelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Tuple , _snake_case : Tuple=False ):
lowerCAmelCase : Union[str, Any] = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCAmelCase : Tuple = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCAmelCase : Optional[Any] = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCAmelCase : List[Any] = MobileViTForImageClassification(_lowercase ).eval()
lowerCAmelCase : Optional[int] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase : Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase : Optional[Any] = model(**_lowercase )
lowerCAmelCase : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCAmelCase : List[str] = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCAmelCase : int = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowerCAmelCase : int = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
lowerCAmelCase : int = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
lowerCAmelCase : str = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1E-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCAmelCase : Dict = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print('''Pushing to the hub...''' )
lowerCAmelCase : Any = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 363
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase : str = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
lowerCAmelCase : List[str] = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] )
lowerCAmelCase : Optional[Any] = bert_tokens
lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case )
while start < end:
lowerCAmelCase : str = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : List[Any] = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j]
lowerCAmelCase : Union[str, Any] = start + i
lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : int = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
lowerCAmelCase : Optional[int] = []
for id in input_ids:
lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
lowerCAmelCase : Any = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[str] = f.readlines()
lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
snake_case__ : int = parser.parse_args()
main(args)
| 314
| 0
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Dict = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(a__ ) )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[str] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(a__ ) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowerCAmelCase : List[str] = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowerCAmelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def lowerCamelCase__ ( self : Optional[int] ):
# pass variant but use the non-variant filenames
lowerCAmelCase : int = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
lowerCAmelCase : List[str] = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCAmelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(a__ , variant=a__ ) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Union[str, Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
lowerCAmelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def lowerCamelCase__ ( self : Optional[int] ):
# pass variant but use the non-variant filenames
lowerCAmelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
lowerCAmelCase : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[Any] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowerCAmelCase : Dict = '''fp16'''
self.assertFalse(is_safetensors_compatible(a__ , variant=a__ ) )
| 364
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class snake_case_:
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ):
pass
def _snake_case ( _snake_case : Image ):
lowerCAmelCase : Any = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Any ):
lowerCAmelCase : Optional[Any] = DepthEstimationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Any = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , lowerCAmelCase_ )
import datasets
lowerCAmelCase : Any = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
lowerCAmelCase : Optional[Any] = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , lowerCAmelCase_ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowerCamelCase__ ( self : Any ):
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = '''Intel/dpt-large'''
lowerCAmelCase : Dict = pipeline('''depth-estimation''' , model=lowerCAmelCase_ )
lowerCAmelCase : Optional[Any] = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
lowerCAmelCase : Union[str, Any] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase__ ( self : int ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 365
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class snake_case_( _lowerCAmelCase ):
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
__UpperCamelCase = True
def lowerCamelCase__ ( self : Optional[Any] ):
super().setup()
lowerCAmelCase : Optional[Any] = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : str = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Optional[int] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class snake_case_( _lowerCAmelCase ):
__UpperCamelCase = FlaxBigBirdForNaturalQuestionsModule
def _snake_case ( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : List[str] , _snake_case : Optional[int] ):
def cross_entropy(_snake_case : Dict , _snake_case : str , _snake_case : Union[str, Any]=None ):
lowerCAmelCase : List[Any] = logits.shape[-1]
lowerCAmelCase : Tuple = (labels[..., None] == jnp.arange(lowerCAmelCase__ )[None]).astype('''f4''' )
lowerCAmelCase : int = jax.nn.log_softmax(lowerCAmelCase__ , axis=-1 )
lowerCAmelCase : Tuple = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCAmelCase : List[Any] = reduction(lowerCAmelCase__ )
return loss
lowerCAmelCase : Dict = partial(lowerCAmelCase__ , reduction=jnp.mean )
lowerCAmelCase : Any = cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase : str = cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase : Optional[Any] = cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class snake_case_:
__UpperCamelCase = '''google/bigbird-roberta-base'''
__UpperCamelCase = 3_000
__UpperCamelCase = 10_500
__UpperCamelCase = 128
__UpperCamelCase = 3
__UpperCamelCase = 1
__UpperCamelCase = 5
# tx_args
__UpperCamelCase = 3e-5
__UpperCamelCase = 0.0
__UpperCamelCase = 20_000
__UpperCamelCase = 0.00_95
__UpperCamelCase = '''bigbird-roberta-natural-questions'''
__UpperCamelCase = '''training-expt'''
__UpperCamelCase = '''data/nq-training.jsonl'''
__UpperCamelCase = '''data/nq-validation.jsonl'''
def lowerCamelCase__ ( self : Tuple ):
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Optional[Any] = os.path.join(self.base_dir , self.save_dir )
lowerCAmelCase : int = self.batch_size_per_device * jax.device_count()
@dataclass
class snake_case_:
__UpperCamelCase = 42
__UpperCamelCase = 4_096 # no dynamic padding on TPUs
def __call__( self : List[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self.collate_fn(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : str = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return batch
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Union[str, Any] = self.fetch_inputs(features['''input_ids'''] )
lowerCAmelCase : Tuple = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Any = [self._fetch_inputs(SCREAMING_SNAKE_CASE_ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
lowerCAmelCase : Union[str, Any] = [1 for _ in range(len(SCREAMING_SNAKE_CASE_ ) )]
while len(SCREAMING_SNAKE_CASE_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case ( _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple=None ):
if seed is not None:
lowerCAmelCase : int = dataset.shuffle(seed=lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) // batch_size ):
lowerCAmelCase : Optional[int] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCAmelCase__ )
@partial(jax.pmap , axis_name='''batch''' )
def _snake_case ( _snake_case : List[str] , _snake_case : int , **_snake_case : List[Any] ):
def loss_fn(_snake_case : Tuple ):
lowerCAmelCase : List[str] = model_inputs.pop('''start_labels''' )
lowerCAmelCase : str = model_inputs.pop('''end_labels''' )
lowerCAmelCase : List[str] = model_inputs.pop('''pooled_labels''' )
lowerCAmelCase : Optional[int] = state.apply_fn(**lowerCAmelCase__ , params=lowerCAmelCase__ , dropout_rng=lowerCAmelCase__ , train=lowerCAmelCase__ )
lowerCAmelCase : Any = outputs
return state.loss_fn(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
lowerCAmelCase : Optional[int] = jax.random.split(lowerCAmelCase__ )
lowerCAmelCase : Tuple = jax.value_and_grad(lowerCAmelCase__ )
lowerCAmelCase : int = grad_fn(state.params )
lowerCAmelCase : Optional[Any] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
lowerCAmelCase : List[Any] = jax.lax.pmean(lowerCAmelCase__ , '''batch''' )
lowerCAmelCase : int = state.apply_gradients(grads=lowerCAmelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def _snake_case ( _snake_case : Dict , **_snake_case : Dict ):
lowerCAmelCase : int = model_inputs.pop('''start_labels''' )
lowerCAmelCase : List[str] = model_inputs.pop('''end_labels''' )
lowerCAmelCase : Dict = model_inputs.pop('''pooled_labels''' )
lowerCAmelCase : List[Any] = state.apply_fn(**lowerCAmelCase__ , params=state.params , train=lowerCAmelCase__ )
lowerCAmelCase : str = outputs
lowerCAmelCase : Tuple = state.loss_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase : Optional[int] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class snake_case_( train_state.TrainState ):
__UpperCamelCase = struct.field(pytree_node=_lowerCAmelCase )
@dataclass
class snake_case_:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int=None ):
lowerCAmelCase : Dict = model.params
lowerCAmelCase : Union[str, Any] = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowerCAmelCase : List[Any] = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : List[str] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowerCAmelCase : Dict = build_tx(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase : int = args
lowerCAmelCase : str = data_collator
lowerCAmelCase : Dict = lr
lowerCAmelCase : Any = params
lowerCAmelCase : str = jax_utils.replicate(SCREAMING_SNAKE_CASE_ )
return state
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = self.args
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) // args.batch_size
lowerCAmelCase : Dict = jax.random.PRNGKey(0 )
lowerCAmelCase : int = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCAmelCase : List[str] = jnp.array(0 , dtype=jnp.floataa )
lowerCAmelCase : Optional[Any] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=F'''Running EPOCH-{epoch}''' ):
lowerCAmelCase : Dict = self.data_collator(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Any = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
lowerCAmelCase : int = jax_utils.unreplicate(state.step )
lowerCAmelCase : Optional[Any] = running_loss.item() / i
lowerCAmelCase : Optional[int] = self.scheduler_fn(state_step - 1 )
lowerCAmelCase : Dict = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_ ) )
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : int = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size )
lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE_ ) // self.args.batch_size
lowerCAmelCase : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa )
lowerCAmelCase : Optional[int] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc='''Evaluating ... ''' ):
lowerCAmelCase : List[str] = self.data_collator(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Union[str, Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_ )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' )
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , '''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , SCREAMING_SNAKE_CASE_ )
print('''DONE''' )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(lowerCAmelCase__ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
lowerCAmelCase : Union[str, Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowerCAmelCase__ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
lowerCAmelCase : Optional[Any] = from_bytes(state.opt_state , f.read() )
lowerCAmelCase : Union[str, Any] = joblib.load(os.path.join(lowerCAmelCase__ , '''args.joblib''' ) )
lowerCAmelCase : int = joblib.load(os.path.join(lowerCAmelCase__ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCAmelCase__ , '''training_state.json''' ) , '''r''' ) as f:
lowerCAmelCase : Optional[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase : Optional[Any] = training_state["""step"""]
print('''DONE''' )
return params, opt_state, step, args, data_collator
def _snake_case ( _snake_case : List[str] , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Any ):
lowerCAmelCase : Optional[int] = num_train_steps - warmup_steps
lowerCAmelCase : Dict = optax.linear_schedule(init_value=lowerCAmelCase__ , end_value=lowerCAmelCase__ , transition_steps=lowerCAmelCase__ )
lowerCAmelCase : Dict = optax.linear_schedule(init_value=lowerCAmelCase__ , end_value=1E-7 , transition_steps=lowerCAmelCase__ )
lowerCAmelCase : Dict = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : List[str] , _snake_case : Any , _snake_case : Union[str, Any] ):
def weight_decay_mask(_snake_case : int ):
lowerCAmelCase : Union[str, Any] = traverse_util.flatten_dict(lowerCAmelCase__ )
lowerCAmelCase : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCAmelCase__ )
lowerCAmelCase : List[Any] = scheduler_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase : Dict = optax.adamw(learning_rate=lowerCAmelCase__ , weight_decay=lowerCAmelCase__ , mask=lowerCAmelCase__ )
return tx, lr
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case__ : str = logging.get_logger(__name__)
class snake_case_( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float , **UpperCamelCase_ : Any ):
lowerCAmelCase : Optional[Any] = feature_size
lowerCAmelCase : Dict = sampling_rate
lowerCAmelCase : List[Any] = padding_value
lowerCAmelCase : Dict = kwargs.pop('''padding_side''' , '''right''' )
lowerCAmelCase : Tuple = kwargs.pop('''return_attention_mask''' , UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , ):
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowerCAmelCase : Any = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
lowerCAmelCase : Any = processed_features[self.model_input_names[0]]
lowerCAmelCase : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCamelCase__ ) == 0:
if return_attention_mask:
lowerCAmelCase : Any = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCAmelCase : int = required_input[0]
if isinstance(UpperCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCAmelCase : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCamelCase__ ):
lowerCAmelCase : Any = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCamelCase__ ):
lowerCAmelCase : Tuple = '''tf'''
elif is_torch_tensor(UpperCamelCase__ ):
lowerCAmelCase : Tuple = '''pt'''
elif isinstance(UpperCamelCase__ , (int, float, list, tuple, np.ndarray) ):
lowerCAmelCase : Any = '''np'''
else:
raise ValueError(
F'''type of {first_element} unknown: {type(UpperCamelCase__ )}. '''
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowerCAmelCase : Dict = to_numpy(UpperCamelCase__ )
else:
lowerCAmelCase : int = [to_numpy(UpperCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCAmelCase : Tuple = self._get_padding_strategies(padding=UpperCamelCase__ , max_length=UpperCamelCase__ )
lowerCAmelCase : Any = processed_features[self.model_input_names[0]]
lowerCAmelCase : Dict = len(UpperCamelCase__ )
if not all(len(UpperCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
lowerCAmelCase : Union[str, Any] = []
for i in range(UpperCamelCase__ ):
lowerCAmelCase : Dict = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCAmelCase : int = self._truncate(
UpperCamelCase__ , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , truncation=UpperCamelCase__ , )
truncated_inputs.append(UpperCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCAmelCase : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowerCAmelCase : int = PaddingStrategy.MAX_LENGTH
lowerCAmelCase : int = {}
for i in range(UpperCamelCase__ ):
# padding
lowerCAmelCase : Optional[Any] = self._pad(
truncated_inputs[i] , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowerCAmelCase : str = []
if value.dtype is np.dtype(np.floataa ):
lowerCAmelCase : Dict = value.astype(np.floataa )
batch_outputs[key].append(UpperCamelCase__ )
return BatchFeature(UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Optional[int] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCAmelCase : Tuple = len(UpperCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCAmelCase : Optional[int] = np.ones(len(UpperCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
lowerCAmelCase : Union[str, Any] = max_length - len(UpperCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
lowerCAmelCase : str = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
lowerCAmelCase : Tuple = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCAmelCase : Optional[int] = np.pad(
UpperCamelCase__ , UpperCamelCase__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowerCAmelCase : Optional[int] = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
lowerCAmelCase : int = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCAmelCase : int = np.pad(
UpperCamelCase__ , UpperCamelCase__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
lowerCAmelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase : List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase : List[Any] = len(UpperCamelCase__ ) > max_length
if needs_to_be_truncated:
lowerCAmelCase : Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCAmelCase : List[str] = processed_features['''attention_mask'''][:max_length]
return processed_features
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Optional[Any]=None ):
if padding is not False:
if padding is True:
lowerCAmelCase : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase : Any = PaddingStrategy(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase : Any = padding
else:
lowerCAmelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 367
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Optional[Any] = '''
import os
'''
snake_case__ : Tuple = '''
def foo():
import os
return False
'''
snake_case__ : Any = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : int = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : Tuple = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 314
| 0
|
"""simple docstring"""
from math import isclose, sqrt
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Tuple = point_y / 4 / point_x
lowerCAmelCase : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase : List[str] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase : List[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase : Tuple = outgoing_gradient**2 + 4
lowerCAmelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase : Union[str, Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase : Optional[int] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase : str = x_minus if isclose(lowerCamelCase_ , lowerCamelCase_ ) else x_plus
lowerCAmelCase : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _snake_case ( _snake_case : Any = 1.4 , _snake_case : int = -9.6 ):
lowerCAmelCase : int = 0
lowerCAmelCase : float = first_x_coord
lowerCAmelCase : float = first_y_coord
lowerCAmelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase : List[Any] = next_point(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ):
super().__init__()
lowerCAmelCase : Dict = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : Union[str, Any] = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : str = name
def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : str = global_step_float / warmup_steps_float
lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ):
lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , )
if num_warmup_steps:
lowerCAmelCase : List[str] = WarmUp(
initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , )
if weight_decay_rate > 0.0:
lowerCAmelCase : Dict = AdamWeightDecay(
learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , )
else:
lowerCAmelCase : Any = tf.keras.optimizers.Adam(
learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case_( a__ ):
def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = weight_decay_rate
lowerCAmelCase : List[str] = include_in_weight_decay
lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ):
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : Dict = apply_state or {}
lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ):
lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class snake_case_( a__ ):
def __init__( self : Any ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = None
@property
def lowerCamelCase__ ( self : List[str] ):
if self._accum_steps is None:
lowerCAmelCase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Any ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
if not self._gradients:
lowerCAmelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 314
| 0
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
snake_case__ : Optional[int] = '''naver-clova-ix/donut-base'''
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = DonutProcessor.from_pretrained(UpperCamelCase__ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowerCAmelCase : Optional[Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowerCAmelCase : Tuple = self.processor.tokenajson(UpperCamelCase__ )
self.assertDictEqual(UpperCamelCase__ , UpperCamelCase__ )
| 369
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if n == 1 or not isinstance(snake_case_ , snake_case_ ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase : Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( _snake_case : int ):
lowerCAmelCase : Any = 0
lowerCAmelCase : Tuple = 2
while digits < n:
index += 1
lowerCAmelCase : Tuple = len(str(fibonacci(snake_case_ ) ) )
return index
def _snake_case ( _snake_case : int = 1000 ):
return fibonacci_digits_index(snake_case_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 370
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _snake_case ( _snake_case : Optional[int] ):
lowerCAmelCase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape
lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase : Tuple = emb.weight.data
return lin_layer
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ):
lowerCAmelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
lowerCAmelCase : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCAmelCase : Tuple = state_dict[old_key]
return new_dict
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ):
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Tuple = 0
os.makedirs(_snake_case , exist_ok=_snake_case )
for expert in range(_snake_case ):
lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(_snake_case ):
lowerCAmelCase : List[str] = torch.load(_snake_case )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Any = os.path.join(
_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
torch.save(_snake_case , _snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_snake_case )[0]].dtype )
# Add the last block
lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_snake_case ) == 1:
lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case )
torch.save(_snake_case , _snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_snake_case , _snake_case )
# Otherwise, let's build the index
lowerCAmelCase : Dict = {}
for idx, shard in enumerate(_snake_case ):
lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' )
lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) )
for key in shard:
lowerCAmelCase : List[Any] = shard_file
# Add the metadata
lowerCAmelCase : Dict = {'''total_size''': total_size}
lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n'''
f.write(_snake_case )
return metadata, index
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
snake_case__ : List[str] = parser.parse_args()
snake_case__ , snake_case__ : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
snake_case__ : str = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 314
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : str = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class snake_case_( _a ):
__UpperCamelCase = """longformer"""
def __init__( self : Optional[Any] , UpperCamelCase_ : Tuple = 5_1_2 , UpperCamelCase_ : Any = 2 , UpperCamelCase_ : Dict = 1 , UpperCamelCase_ : Any = 0 , UpperCamelCase_ : str = 2 , UpperCamelCase_ : int = 3_0_5_2_2 , UpperCamelCase_ : Union[str, Any] = 7_6_8 , UpperCamelCase_ : Optional[int] = 1_2 , UpperCamelCase_ : Tuple = 1_2 , UpperCamelCase_ : Any = 3_0_7_2 , UpperCamelCase_ : Optional[int] = "gelu" , UpperCamelCase_ : Tuple = 0.1 , UpperCamelCase_ : Optional[int] = 0.1 , UpperCamelCase_ : List[Any] = 5_1_2 , UpperCamelCase_ : Union[str, Any] = 2 , UpperCamelCase_ : int = 0.02 , UpperCamelCase_ : Tuple = 1E-12 , UpperCamelCase_ : List[str] = False , **UpperCamelCase_ : List[Any] , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase : Any = attention_window
lowerCAmelCase : Tuple = sep_token_id
lowerCAmelCase : Union[str, Any] = bos_token_id
lowerCAmelCase : Tuple = eos_token_id
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Tuple = layer_norm_eps
lowerCAmelCase : int = onnx_export
class snake_case_( _a ):
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] = "default" , UpperCamelCase_ : Tuple = None ):
super().__init__(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase : Union[str, Any] = True
@property
def lowerCamelCase__ ( self : str ):
if self.task == "multiple-choice":
lowerCAmelCase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = super().outputs
if self.task == "default":
lowerCAmelCase : int = {0: '''batch'''}
return outputs
@property
def lowerCamelCase__ ( self : List[str] ):
return 1E-4
@property
def lowerCamelCase__ ( self : List[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict = -1 , UpperCamelCase_ : List[Any] = -1 , UpperCamelCase_ : Dict = False , UpperCamelCase_ : Optional[Any] = None , ):
lowerCAmelCase : int = super().generate_dummy_inputs(
preprocessor=__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase : int = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowerCAmelCase : List[Any] = 1
return inputs
| 371
|
"""simple docstring"""
from math import sqrt
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase : Dict = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase : Optional[int] = False
for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase : int = False
break
# precondition
assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool"
return status
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) )
lowerCAmelCase : Optional[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_snake_case ) ):
for j in range(i + 1 , len(_snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase : Any = 0
# filters actual prime numbers.
lowerCAmelCase : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase : Tuple = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_snake_case ):
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase : Dict = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : List[str] = number
if number == 0 or number == 1:
ans.append(_snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_snake_case ):
while quotient != 1:
if is_prime(_snake_case ) and (quotient % factor == 0):
ans.append(_snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : Tuple ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : Optional[Any] = 0
# prime factorization of 'number'
lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Any = max(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Dict ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : int = 0
# prime factorization of 'number'
lowerCAmelCase : List[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Optional[int] = min(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( _snake_case : Tuple ):
assert (
isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case )
), "'number' must been an int, even and > 2"
lowerCAmelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case )
lowerCAmelCase : Optional[Any] = len(_snake_case )
# run variable for while-loops.
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase : str = True
while i < len_pn and loop:
lowerCAmelCase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase : Dict = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (len(_snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Dict = 0
while numbera != 0:
lowerCAmelCase : Union[str, Any] = numbera % numbera
lowerCAmelCase : List[Any] = numbera
lowerCAmelCase : List[Any] = rest
# precondition
assert isinstance(_snake_case , _snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase : List[str] = prime_factorization(_snake_case )
lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case )
elif numbera == 1 or numbera == 1:
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[str] = max(_snake_case , _snake_case )
lowerCAmelCase : Dict = 0
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case )
lowerCAmelCase : Any = prime_fac_a.count(_snake_case )
for _ in range(max(_snake_case , _snake_case ) ):
ans *= n
else:
lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( _snake_case : Any ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_snake_case ):
ans += 1
# precondition
assert isinstance(_snake_case , _snake_case ) and is_prime(
_snake_case ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Dict ):
assert (
is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number
lowerCAmelCase : str = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
while number < p_number_a:
ans.append(_snake_case )
number += 1
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and ans[0] != p_number_a
and ans[len(_snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( _snake_case : List[Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_snake_case )
# precondition
assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase : int = get_divisors(_snake_case )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (divisors[0] == 1)
and (divisors[len(_snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( _snake_case : Optional[int] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase : Optional[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase : Dict = 0
lowerCAmelCase : Dict = 1
lowerCAmelCase : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase : int = ans
ans += fiba
lowerCAmelCase : Optional[Any] = tmp
return ans
| 314
| 0
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
snake_case__ : Union[str, Any] = '''bert-base-cased'''
snake_case__ : List[str] = '''google/pegasus-xsum'''
snake_case__ : int = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
snake_case__ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
snake_case__ : List[str] = '''patrickvonplaten/t5-tiny-random'''
snake_case__ : Dict = '''sshleifer/bart-tiny-random'''
snake_case__ : List[Any] = '''sshleifer/tiny-mbart'''
snake_case__ : Dict = '''sshleifer/tiny-marian-en-de'''
def _snake_case ( _snake_case : Any , _snake_case : str ):
lowerCAmelCase : List[Any] = '\n'.join(__a )
Path(__a ).open('''w''' ).writelines(__a )
def _snake_case ( _snake_case : Union[str, Any] ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , f'''{split}.source''' ) , __a )
_dump_articles(os.path.join(__a , f'''{split}.target''' ) , __a )
return tmp_dir
class snake_case_( snake_case_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained(_A )
lowerCAmelCase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase : Dict = max(len(tokenizer.encode(_A ) ) for a in ARTICLES )
lowerCAmelCase : str = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES )
lowerCAmelCase : str = 4
lowerCAmelCase : List[str] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase : Optional[int] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
lowerCAmelCase : Union[str, Any] = SeqaSeqDataset(
_A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , src_lang=_A , tgt_lang=_A , )
lowerCAmelCase : Optional[Any] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_A , _A )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase : Union[str, Any] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_A )
lowerCAmelCase : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase : List[str] = max(len(tokenizer.encode(_A ) ) for a in ARTICLES )
lowerCAmelCase : Dict = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES )
lowerCAmelCase : str = 4
lowerCAmelCase : Tuple = LegacySeqaSeqDataset(
_A , data_dir=_A , type_path='''train''' , max_source_length=2_0 , max_target_length=_A , )
lowerCAmelCase : Union[str, Any] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
lowerCAmelCase : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase : Optional[Any] = tmp_dir.joinpath('''train.source''' ).open().readlines()
lowerCAmelCase : Optional[int] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_A , _A , 1_2_8 , _A )
lowerCAmelCase : Optional[int] = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase : Optional[Any] = {x.name for x in save_dir.iterdir()}
lowerCAmelCase : List[str] = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_A ) < len(_A )
assert len(_A ) == 1
assert len(packed_examples[0] ) == sum(len(_A ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase : str = self._get_dataset(max_len=6_4 )
lowerCAmelCase : Optional[int] = 6_4
lowerCAmelCase : List[Any] = ds.make_dynamic_sampler(_A , required_batch_size_multiple=_A )
lowerCAmelCase : Optional[int] = [len(_A ) for x in batch_sampler]
assert len(set(_A ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_A ) == len(_A ) # no dropped or added examples
lowerCAmelCase : List[str] = DataLoader(_A , batch_sampler=_A , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Optional[int] = []
for batch in data_loader:
lowerCAmelCase : str = batch['input_ids'].shape
lowerCAmelCase : Tuple = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase : Optional[int] = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(_A )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_A )
assert num_src_per_batch[0] == max(_A )
if failures:
raise AssertionError(F'''too many tokens in {len(_A )} batches''' )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[Any] = self._get_dataset(max_len=5_1_2 )
lowerCAmelCase : Dict = 2
lowerCAmelCase : Dict = ds.make_sortish_sampler(_A , shuffle=_A )
lowerCAmelCase : Dict = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase : Union[str, Any] = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 , sampler=_A )
lowerCAmelCase : List[Any] = tokenizer.pad_token_id
def count_pad_tokens(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int="input_ids" ):
return [batch[k].eq(_A ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_A , k='''labels''' ) ) < sum(count_pad_tokens(_A , k='''labels''' ) )
assert sum(count_pad_tokens(_A ) ) < sum(count_pad_tokens(_A ) )
assert len(_A ) == len(_A )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[int]=1_0_0_0 , UpperCamelCase_ : List[str]=1_2_8 ):
if os.getenv('''USE_REAL_DATA''' , _A ):
lowerCAmelCase : List[Any] = 'examples/seq2seq/wmt_en_ro'
lowerCAmelCase : Optional[Any] = max_len * 2 * 6_4
if not Path(_A ).joinpath('''train.len''' ).exists():
save_len_file(_A , _A )
else:
lowerCAmelCase : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
lowerCAmelCase : List[str] = max_len * 4
save_len_file(_A , _A )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_A )
lowerCAmelCase : str = SeqaSeqDataset(
_A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , n_obs=_A , )
return ds, max_tokens, tokenizer
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[int] = self._get_dataset()
lowerCAmelCase : List[str] = set(DistributedSortishSampler(_A , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=_A ) )
lowerCAmelCase : List[str] = set(DistributedSortishSampler(_A , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=_A ) )
assert idsa.intersection(_A ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A )
if tok_name == MBART_TINY:
lowerCAmelCase : Optional[int] = SeqaSeqDataset(
_A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
lowerCAmelCase : Dict = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase : Optional[int] = SeqaSeqDataset(
_A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_A ) == 1 if tok_name == BART_TINY else len(_A ) == 0
| 350
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Any = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_( a__ ):
__UpperCamelCase = '''vit_msn'''
def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : List[str] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
| 314
| 0
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case_( _SCREAMING_SNAKE_CASE ):
def __init__( self : Any , *UpperCamelCase_ : Tuple , UpperCamelCase_ : str=None , UpperCamelCase_ : str=None , **UpperCamelCase_ : Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = eval_examples
lowerCAmelCase : Optional[int] = post_process_function
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str = "eval" ):
lowerCAmelCase : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase : Tuple = self.get_eval_dataloader(UpperCamelCase_ )
lowerCAmelCase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : List[Any] = self.compute_metrics
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase : List[str] = time.time()
try:
lowerCAmelCase : str = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowerCAmelCase : Optional[Any] = compute_metrics
lowerCAmelCase : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase : Union[str, Any] = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions )
lowerCAmelCase : Dict = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCAmelCase : str = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowerCAmelCase : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str = "test" ):
lowerCAmelCase : Any = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : Any = self.compute_metrics
lowerCAmelCase : int = None
lowerCAmelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase : List[str] = time.time()
try:
lowerCAmelCase : Union[str, Any] = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowerCAmelCase : List[str] = compute_metrics
lowerCAmelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase : Optional[int] = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' )
lowerCAmelCase : List[Any] = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCAmelCase : Any = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 351
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case )
lowerCAmelCase : Optional[int] = {
'''repo_id''': str(_snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_snake_case , _snake_case , indent=4 )
def _snake_case ( _snake_case : Any ):
if params.n_gpu <= 0:
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Dict = True
lowerCAmelCase : int = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
lowerCAmelCase : int = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node
lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node
lowerCAmelCase : str = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = 1
lowerCAmelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase : List[Any] = params.n_nodes > 1
# summary
lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _snake_case ( _snake_case : Optional[int] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 314
| 0
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case_( tf.keras.layers.Layer ):
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str=1 , UpperCamelCase_ : str=False , **UpperCamelCase_ : Dict ):
super().__init__(**snake_case_ )
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_proj
lowerCAmelCase : List[Any] = cutoffs + [vocab_size]
lowerCAmelCase : str = [0] + self.cutoffs
lowerCAmelCase : List[str] = div_val
lowerCAmelCase : str = self.cutoffs[0]
lowerCAmelCase : Dict = len(self.cutoffs ) - 1
lowerCAmelCase : Dict = self.shortlist_size + self.n_clusters
lowerCAmelCase : Optional[Any] = keep_order
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Any = []
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] ):
if self.n_clusters > 0:
lowerCAmelCase : Optional[Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=snake_case_ , name='''cluster_weight''' )
lowerCAmelCase : Tuple = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=snake_case_ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=snake_case_ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(snake_case_ )
else:
self.out_projs.append(snake_case_ )
lowerCAmelCase : Tuple = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=snake_case_ , name=F'''out_layers_._{i}_._weight''' , )
lowerCAmelCase : Optional[Any] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=snake_case_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : Union[str, Any] = self.d_embed // (self.div_val**i)
lowerCAmelCase : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=snake_case_ , name=F'''out_projs_._{i}''' )
self.out_projs.append(snake_case_ )
lowerCAmelCase : List[str] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=snake_case_ , name=F'''out_layers_._{i}_._weight''' , )
lowerCAmelCase : Tuple = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=snake_case_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(snake_case_ )
@staticmethod
def lowerCamelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=None ):
lowerCAmelCase : Dict = x
if proj is not None:
lowerCAmelCase : Dict = tf.einsum('''ibd,ed->ibe''' , snake_case_ , snake_case_ )
return tf.einsum('''ibd,nd->ibn''' , snake_case_ , snake_case_ ) + b
@staticmethod
def lowerCamelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = shape_list(snake_case_ )
lowerCAmelCase : List[str] = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase : Optional[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=False ):
lowerCAmelCase : int = 0
if self.n_clusters == 0:
lowerCAmelCase : Dict = self._logit(snake_case_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase : str = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case_ , logits=snake_case_ )
lowerCAmelCase : int = tf.nn.log_softmax(snake_case_ , axis=-1 )
else:
lowerCAmelCase : str = shape_list(snake_case_ )
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase : str = (target >= l_idx) & (target < r_idx)
lowerCAmelCase : str = tf.where(snake_case_ )
lowerCAmelCase : Dict = tf.boolean_mask(snake_case_ , snake_case_ ) - l_idx
if self.div_val == 1:
lowerCAmelCase : Union[str, Any] = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase : List[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase : str = self.out_layers[i][0]
lowerCAmelCase : Optional[int] = self.out_layers[i][1]
if i == 0:
lowerCAmelCase : Dict = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase : int = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase : Union[str, Any] = self._logit(snake_case_ , snake_case_ , snake_case_ , self.out_projs[0] )
lowerCAmelCase : Any = tf.nn.log_softmax(snake_case_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase : Dict = tf.boolean_mask(snake_case_ , snake_case_ )
lowerCAmelCase : List[str] = self._gather_logprob(snake_case_ , snake_case_ )
else:
lowerCAmelCase : Union[str, Any] = self._logit(snake_case_ , snake_case_ , snake_case_ , self.out_projs[i] )
lowerCAmelCase : List[Any] = tf.nn.log_softmax(snake_case_ )
lowerCAmelCase : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase : List[Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(snake_case_ )
if target is not None:
lowerCAmelCase : List[str] = tf.boolean_mask(snake_case_ , snake_case_ )
lowerCAmelCase : Union[str, Any] = tf.boolean_mask(snake_case_ , snake_case_ )
lowerCAmelCase : Optional[Any] = self._gather_logprob(snake_case_ , snake_case_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(snake_case_ , -cur_logprob , shape_list(snake_case_ ) )
lowerCAmelCase : Union[str, Any] = tf.concat(snake_case_ , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase : Optional[Any] = tf.reduce_mean(snake_case_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(snake_case_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(snake_case_ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 352
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
lowerCAmelCase : str = sylvester(number - 1 )
lowerCAmelCase : Optional[Any] = num - 1
lowerCAmelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 314
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase : Union[str, Any] = 6
lowerCAmelCase : Any = 128
lowerCAmelCase : List[Any] = (2, 2, 18, 2)
lowerCAmelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase : Tuple = 12
lowerCAmelCase : Dict = 192
lowerCAmelCase : List[str] = (2, 2, 18, 2)
lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCAmelCase : Optional[int] = window_size
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : int = num_heads
return config
def _snake_case ( _snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase : str = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase : List[Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : Optional[Any] = int(key_split[4] )
lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase : Dict = val[:dim, :]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : str = val[
:dim
]
lowerCAmelCase : List[str] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[Any] = val[
-dim:
]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ):
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model''']
lowerCAmelCase : List[Any] = get_swin_config(_snake_case )
lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314
| 0
|
"""simple docstring"""
import os
def _snake_case ( ):
lowerCAmelCase : Tuple = os.path.dirname(os.path.realpath(_snake_case ) )
lowerCAmelCase : Union[str, Any] = os.path.join(_snake_case , '''triangle.txt''' )
with open(_snake_case ) as f:
lowerCAmelCase : List[Any] = f.readlines()
lowerCAmelCase : int = []
for line in triangle:
lowerCAmelCase : Optional[int] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_snake_case ) )
a.append(_snake_case )
for i in range(1 , len(_snake_case ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase : str = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase : int = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_snake_case , _snake_case )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 354
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = image[0].size
lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase : List[str] = 2.0 * image - 1.0
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 )
return image
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : str = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = mask[0].size
lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 )
return mask
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = image
lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ )
lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ )
lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : Union[str, Any] = original_image.shape
lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device )
lowerCAmelCase : Optional[int] = eta
lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1
lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = t
lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
snake_case__ : Union[str, Any] = '''<<<<<<< This should probably be modified because it mentions: '''
snake_case__ : List[Any] = '''=======\n>>>>>>>\n'''
snake_case__ : Any = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
snake_case__ : Optional[int] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _snake_case ( _snake_case : List[str] ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class snake_case_( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase__ ( UpperCamelCase_ : ArgumentParser ):
lowerCAmelCase : Dict = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : str , *UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = get_logger('''datasets-cli/converting''' )
lowerCAmelCase : Optional[Any] = tfds_path
lowerCAmelCase : Dict = datasets_directory
def lowerCamelCase__ ( self : str ):
if os.path.isdir(self._tfds_path ):
lowerCAmelCase : str = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowerCAmelCase : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowerCAmelCase : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowerCAmelCase : Any = []
lowerCAmelCase : Tuple = []
lowerCAmelCase : Optional[Any] = {}
if os.path.isdir(self._tfds_path ):
lowerCAmelCase : Optional[int] = os.listdir(UpperCamelCase_ )
else:
lowerCAmelCase : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowerCAmelCase : str = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowerCAmelCase : Any = f.readlines()
lowerCAmelCase : List[str] = []
lowerCAmelCase : Dict = False
lowerCAmelCase : str = False
lowerCAmelCase : Union[str, Any] = []
for line in lines:
lowerCAmelCase : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCAmelCase : Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowerCAmelCase : Optional[Any] = ''''''
continue
elif "from absl import logging" in out_line:
lowerCAmelCase : Any = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowerCAmelCase : List[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowerCAmelCase : str = True
lowerCAmelCase : Union[str, Any] = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCAmelCase : Dict = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCAmelCase : str = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowerCAmelCase : List[Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCAmelCase : Tuple = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCAmelCase : str = f_name.replace('''.py''' , '''''' )
lowerCAmelCase : List[Any] = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowerCAmelCase : List[Any] = os.path.basename(UpperCamelCase_ )
lowerCAmelCase : Tuple = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 355
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : int = -1
lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : str = TextStreamer(UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Any = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
lowerCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = -1
lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = -1
lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : str = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : List[str] = ''''''
for new_text in streamer:
streamer_text += new_text
| 314
| 0
|
"""simple docstring"""
import heapq
def _snake_case ( _snake_case : dict ):
lowerCAmelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase_ , [-1 * len(lowerCamelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCAmelCase : int = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCAmelCase : str = heapq.heappop(lowerCamelCase_ )[1][0]
chosen_vertices.add(lowerCamelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCAmelCase : Optional[int] = elem[1][1].index(lowerCamelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : List[str] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 356
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case__ : Optional[Any] = False
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ):
set_seed(0 )
lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 )
lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
lowerCAmelCase : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 314
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
snake_case__ : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case__ : Dict = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
snake_case__ : Optional[int] = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
snake_case__ : Union[str, Any] = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ElectraTokenizer
def __init__( self : int , UpperCamelCase_ : int=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]="[UNK]" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : str="[PAD]" , UpperCamelCase_ : Union[str, Any]="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Dict , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
lowerCAmelCase : int = getattr(_A , normalizer_state.pop('''type''' ) )
lowerCAmelCase : Optional[int] = do_lower_case
lowerCAmelCase : List[str] = strip_accents
lowerCAmelCase : List[Any] = tokenize_chinese_chars
lowerCAmelCase : Union[str, Any] = normalizer_class(**_A )
lowerCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str=None ):
lowerCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : Optional[int] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 357
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ):
lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ )
lowerCAmelCase : Any = nsfw_detected.flatten()
lowerCAmelCase : Dict = nsfw_detected > p_threshold
lowerCAmelCase : int = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowerCAmelCase : List[Any] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Union[str, Any] = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 314
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Dict = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case_( lowerCamelCase__ ):
__UpperCamelCase = '''realm'''
def __init__( self : str , UpperCamelCase_ : Union[str, Any]=3_0_5_2_2 , UpperCamelCase_ : Union[str, Any]=7_6_8 , UpperCamelCase_ : int=1_2_8 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=8 , UpperCamelCase_ : Union[str, Any]=3_0_7_2 , UpperCamelCase_ : Optional[int]="gelu_new" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Optional[int]=5_1_2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : List[str]=1E-12 , UpperCamelCase_ : Dict=2_5_6 , UpperCamelCase_ : int=1_0 , UpperCamelCase_ : List[str]=1E-3 , UpperCamelCase_ : Dict=5 , UpperCamelCase_ : int=3_2_0 , UpperCamelCase_ : Tuple=1_3_3_5_3_7_1_8 , UpperCamelCase_ : Optional[Any]=5_0_0_0 , UpperCamelCase_ : str=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Union[str, Any]=2 , **UpperCamelCase_ : Dict , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
# Common config
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : List[Any] = retriever_proj_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Union[str, Any] = num_candidates
lowerCAmelCase : str = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Optional[Any] = type_vocab_size
lowerCAmelCase : Union[str, Any] = layer_norm_eps
# Reader config
lowerCAmelCase : int = span_hidden_size
lowerCAmelCase : Optional[int] = max_span_width
lowerCAmelCase : Optional[Any] = reader_layer_norm_eps
lowerCAmelCase : Tuple = reader_beam_size
lowerCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
lowerCAmelCase : List[str] = num_block_records
lowerCAmelCase : Union[str, Any] = searcher_beam_size
| 358
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Union[str, Any] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
snake_case__ : Optional[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BertTokenizer
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCAmelCase : Tuple = do_lower_case
lowerCAmelCase : Union[str, Any] = strip_accents
lowerCAmelCase : Tuple = tokenize_chinese_chars
lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ):
lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class snake_case_( unittest.TestCase ):
__UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : str = TextaTextGenerationPipeline(model=A__ , tokenizer=A__ )
return generator, ["Something to write", "Something else"]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = generator('''Something there''' )
self.assertEqual(A__ , [{'''generated_text''': ANY(A__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
lowerCAmelCase : Optional[int] = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=A__ )
self.assertEqual(
A__ , [
[{'''generated_text''': ANY(A__ )}, {'''generated_text''': ANY(A__ )}],
[{'''generated_text''': ANY(A__ )}, {'''generated_text''': ANY(A__ )}],
] , )
lowerCAmelCase : List[Any] = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=A__ )
self.assertEqual(
A__ , [
[{'''generated_text''': ANY(A__ )}, {'''generated_text''': ANY(A__ )}],
[{'''generated_text''': ANY(A__ )}, {'''generated_text''': ANY(A__ )}],
] , )
with self.assertRaises(A__ ):
generator(4 )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
lowerCAmelCase : List[Any] = generator('''Something there''' , do_sample=A__ )
self.assertEqual(A__ , [{'''generated_text''': ''''''}] )
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : Tuple = generator(
'''Something there''' , num_return_sequences=A__ , num_beams=A__ , )
lowerCAmelCase : List[Any] = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(A__ , A__ )
lowerCAmelCase : Optional[int] = generator('''This is a test''' , do_sample=A__ , num_return_sequences=2 , return_tensors=A__ )
self.assertEqual(
A__ , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
lowerCAmelCase : Tuple = generator.model.config.eos_token_id
lowerCAmelCase : Tuple = '''<pad>'''
lowerCAmelCase : Dict = generator(
['''This is a test''', '''This is a second test'''] , do_sample=A__ , num_return_sequences=2 , batch_size=2 , return_tensors=A__ , )
self.assertEqual(
A__ , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Any = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
lowerCAmelCase : Dict = generator('''Something there''' , do_sample=A__ )
self.assertEqual(A__ , [{'''generated_text''': ''''''}] )
| 359
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_( __a , unittest.TestCase ):
__UpperCamelCase = BlenderbotSmallTokenizer
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[int] ):
super().setUp()
lowerCAmelCase : Union[str, Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowerCAmelCase : List[str] = dict(zip(a__ , range(len(a__ ) ) ) )
lowerCAmelCase : Optional[Any] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowerCAmelCase : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : int = '''adapt act apte'''
lowerCAmelCase : Any = '''adapt act apte'''
return input_text, output_text
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase : Union[str, Any] = '''adapt act apte'''
lowerCAmelCase : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
lowerCAmelCase : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCAmelCase : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
lowerCAmelCase : List[Any] = '''I am a small frog.'''
lowerCAmelCase : str = tok([src_text] , padding=a__ , truncation=a__ )['''input_ids''']
lowerCAmelCase : Dict = tok.batch_decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
lowerCAmelCase : Union[str, Any] = '''I am a small frog .'''
lowerCAmelCase : Dict = '''.'''
lowerCAmelCase : str = tok(a__ )['''input_ids''']
lowerCAmelCase : Any = tok(a__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 360
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 50000000 ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) )
lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) )
for primea in primes:
lowerCAmelCase : Optional[Any] = primea * primea
for primea in primes:
lowerCAmelCase : List[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCAmelCase : Tuple = primea * primea * primea * primea
lowerCAmelCase : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : list[int] , _snake_case : list[int] ):
lowerCAmelCase : Optional[int] = len(_snake_case )
print('''The following activities are selected:''' )
# The first activity is always selected
lowerCAmelCase : Dict = 0
print(_snake_case , end=''',''' )
# Consider rest of the activities
for j in range(_snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_snake_case , end=''',''' )
lowerCAmelCase : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Optional[int] = [1, 3, 0, 5, 8, 5]
snake_case__ : Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Tuple = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor''']
snake_case__ : List[Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
snake_case__ : Optional[Any] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
import numpy as np
def _snake_case ( _snake_case : Union[str, Any] ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ):
lowerCAmelCase : Tuple = '''bilinear'''
lowerCAmelCase : List[Any] = max_size
lowerCAmelCase : Optional[int] = short_edge_length
def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = []
for img in imgs:
lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w
else:
lowerCAmelCase, lowerCAmelCase : int = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = newh * scale
lowerCAmelCase : str = neww * scale
lowerCAmelCase : Union[str, Any] = int(neww + 0.5 )
lowerCAmelCase : str = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCAmelCase : Optional[int] = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class snake_case_:
def __init__( self : Tuple , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT
lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY
lowerCAmelCase : int = cfg.PAD_VALUE
lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE
lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
lowerCAmelCase : Dict = [im.shape[-2:] for im in images]
lowerCAmelCase : Dict = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ):
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : List[Any] = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] )
lowerCAmelCase : str = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( _snake_case : str , _snake_case : List[Any] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ):
assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!"
lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=_snake_case )
tensor[:, 1].clamp_(min=0 , max=_snake_case )
tensor[:, 2].clamp_(min=0 , max=_snake_case )
tensor[:, 3].clamp_(min=0 , max=_snake_case )
| 314
| 0
|
"""simple docstring"""
from collections.abc import Generator
from math import sin
def _snake_case ( _snake_case : Tuple ):
if len(UpperCAmelCase__ ) != 32:
raise ValueError('''Input must be of length 32''' )
lowerCAmelCase : str = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _snake_case ( _snake_case : Any ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
lowerCAmelCase : str = format(UpperCAmelCase__ , '''08x''' )[-8:]
lowerCAmelCase : Optional[Any] = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def _snake_case ( _snake_case : Dict ):
lowerCAmelCase : List[Any] = B''''''
for char in message:
bit_string += format(UpperCAmelCase__ , '''08b''' ).encode('''utf-8''' )
lowerCAmelCase : str = format(len(UpperCAmelCase__ ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _snake_case ( _snake_case : Dict ):
if len(UpperCAmelCase__ ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCAmelCase__ ) , 512 ):
lowerCAmelCase : List[str] = bit_string[pos : pos + 512]
lowerCAmelCase : Optional[Any] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _snake_case ( _snake_case : Dict ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
lowerCAmelCase : int = format(UpperCAmelCase__ , '''032b''' )
lowerCAmelCase : List[str] = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase__ , 2 )
def _snake_case ( _snake_case : str , _snake_case : Union[str, Any] ):
return (a + b) % 2**32
def _snake_case ( _snake_case : List[str] , _snake_case : int ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = preprocess(UpperCAmelCase__ )
lowerCAmelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCAmelCase : Dict = 0X6745_2301
lowerCAmelCase : List[str] = 0Xefcd_ab89
lowerCAmelCase : Optional[int] = 0X98ba_dcfe
lowerCAmelCase : str = 0X1032_5476
lowerCAmelCase : Tuple = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase__ ):
lowerCAmelCase : Tuple = aa
lowerCAmelCase : List[str] = ba
lowerCAmelCase : List[Any] = ca
lowerCAmelCase : Union[str, Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase : int = d ^ (b & (c ^ d))
lowerCAmelCase : int = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase : Optional[int] = c ^ (d & (b ^ c))
lowerCAmelCase : Any = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase : Tuple = b ^ c ^ d
lowerCAmelCase : Dict = (3 * i + 5) % 16
else:
lowerCAmelCase : Optional[int] = c ^ (b | not_aa(UpperCAmelCase__ ))
lowerCAmelCase : str = (7 * i) % 16
lowerCAmelCase : Optional[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase : Union[str, Any] = d
lowerCAmelCase : str = c
lowerCAmelCase : str = b
lowerCAmelCase : List[Any] = sum_aa(UpperCAmelCase__ , left_rotate_aa(UpperCAmelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase : Optional[Any] = sum_aa(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase : List[Any] = sum_aa(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase : List[str] = sum_aa(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase : List[Any] = sum_aa(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase : Union[str, Any] = reformat_hex(UpperCAmelCase__ ) + reformat_hex(UpperCAmelCase__ ) + reformat_hex(UpperCAmelCase__ ) + reformat_hex(UpperCAmelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase : str = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
lowerCAmelCase : List[str] = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] )
lowerCAmelCase : Optional[Any] = bert_tokens
lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case )
while start < end:
lowerCAmelCase : str = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : List[Any] = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j]
lowerCAmelCase : Union[str, Any] = start + i
lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : int = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
lowerCAmelCase : Optional[int] = []
for id in input_ids:
lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
lowerCAmelCase : Any = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[str] = f.readlines()
lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
snake_case__ : int = parser.parse_args()
main(args)
| 314
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_:
def __init__( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str]=1_3 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Dict=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase_ : int=[2, 2, 3, 2] , UpperCamelCase_ : str=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[str]=3_7 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : int=1_0 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCamelCase_ : Optional[Any]=[2, 3, 4] , UpperCamelCase_ : Any=None , ):
lowerCAmelCase : Dict = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[str] = num_stages
lowerCAmelCase : Any = hidden_sizes
lowerCAmelCase : Dict = depths
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : str = out_features
lowerCAmelCase : Any = out_indices
lowerCAmelCase : Optional[int] = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : List[Any] = ConvNextVaModel(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase : int = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(_A )
model.to(_A )
model.eval()
lowerCAmelCase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : str = ConvNextVaBackbone(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase : Optional[Any] = model(_A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase : List[str] = None
lowerCAmelCase : str = ConvNextVaBackbone(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase : Any = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : str = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[str] = config_and_inputs
lowerCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class snake_case_( snake_case__ , snake_case__ , unittest.TestCase ):
__UpperCamelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = ConvNextVaModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def lowerCamelCase__ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Union[str, Any] ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase__ ( self : int ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
def lowerCamelCase__ ( self : List[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase : List[str] = True
if model_class.__name__ in [
*get_values(_A ),
*get_values(_A ),
]:
continue
lowerCAmelCase : str = model_class(_A )
model.to(_A )
model.train()
lowerCAmelCase : Optional[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
lowerCAmelCase : Tuple = model(**_A ).loss
loss.backward()
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase : Tuple = False
lowerCAmelCase : int = True
if (
model_class.__name__
in [*get_values(_A ), *get_values(_A )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase : Dict = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase : Tuple = self._prepare_for_class(_A , _A , return_labels=_A )
lowerCAmelCase : Tuple = model(**_A ).loss
loss.backward()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = model_class(_A )
lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowerCamelCase__ ( self : Optional[int] ):
def check_hidden_states_output(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Optional[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowerCAmelCase : Tuple = model(**self._prepare_for_class(_A , _A ) )
lowerCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Optional[int] = True
check_hidden_states_output(_A , _A , _A )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowerCamelCase__ ( self : Tuple ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = ConvNextVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _snake_case ( ):
lowerCAmelCase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : List[Any] ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : int = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_A )
lowerCAmelCase : Union[str, Any] = self.default_image_processor
lowerCAmelCase : Union[str, Any] = prepare_img()
lowerCAmelCase : Optional[Any] = preprocessor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**_A )
# verify the logits
lowerCAmelCase : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
lowerCAmelCase : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 364
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Dict = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case_( a__ ):
__UpperCamelCase = "big_bird"
def __init__( self : Any , UpperCamelCase_ : List[Any]=5_0_3_5_8 , UpperCamelCase_ : Dict=7_6_8 , UpperCamelCase_ : Dict=1_2 , UpperCamelCase_ : Tuple=1_2 , UpperCamelCase_ : Optional[Any]=3_0_7_2 , UpperCamelCase_ : Dict="gelu_new" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=4_0_9_6 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Optional[int]=1E-12 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Any=0 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : int=6_6 , UpperCamelCase_ : Optional[Any]="block_sparse" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=False , UpperCamelCase_ : Optional[Any]=6_4 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Optional[int] , ):
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , sep_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase : int = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Union[str, Any] = type_vocab_size
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Tuple = rescale_embeddings
lowerCAmelCase : Tuple = attention_type
lowerCAmelCase : Optional[int] = use_bias
lowerCAmelCase : Dict = block_size
lowerCAmelCase : Any = num_random_blocks
lowerCAmelCase : List[Any] = classifier_dropout
class snake_case_( a__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
lowerCAmelCase : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 365
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class snake_case_:
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def lowerCamelCase__ ( self : int ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=False , UpperCamelCase_ : str=False , UpperCamelCase_ : Union[str, Any]=False , ):
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : List[Any] = 3_2
lowerCAmelCase : Optional[int] = (3_2, 3_2)
lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = torch.device(UpperCamelCase_ )
lowerCAmelCase : Any = (batch_size, num_channels) + sizes
lowerCAmelCase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = {'''hidden_states''': hidden_states}
if include_temb:
lowerCAmelCase : List[str] = 1_2_8
lowerCAmelCase : Union[str, Any] = randn_tensor((batch_size, temb_channels) , generator=UpperCamelCase_ , device=UpperCamelCase_ )
if include_res_hidden_states_tuple:
lowerCAmelCase : List[str] = torch.manual_seed(1 )
lowerCAmelCase : List[Any] = (randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ ),)
if include_encoder_hidden_states:
lowerCAmelCase : str = floats_tensor((batch_size, 3_2, 3_2) ).to(UpperCamelCase_ )
if include_skip_sample:
lowerCAmelCase : Any = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCamelCase_ , device=UpperCamelCase_ )
return dummy_input
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[str] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
lowerCAmelCase : List[str] = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
lowerCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase : Optional[Any] = self.block_class(**UpperCamelCase_ )
unet_block.to(UpperCamelCase_ )
unet_block.eval()
with torch.no_grad():
lowerCAmelCase : List[Any] = unet_block(**UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Optional[int] = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCAmelCase : Optional[Any] = output[0, -1, -3:, -3:]
lowerCAmelCase : Optional[int] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
assert torch_all_close(output_slice.flatten() , UpperCamelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : str = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase : Tuple = self.block_class(**UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Dict = output[0]
lowerCAmelCase : List[Any] = torch.device(UpperCamelCase_ )
lowerCAmelCase : int = randn_tensor(output.shape , device=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( UpperCAmelCase_ ):
__UpperCamelCase = (DDIMParallelScheduler,)
__UpperCamelCase = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def lowerCamelCase__ ( self : Any , **UpperCamelCase_ : int ):
lowerCAmelCase : List[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__lowercase )
return config
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : str ):
lowerCAmelCase : Tuple = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(**__lowercase )
lowerCAmelCase : int = scheduler_class(**__lowercase )
lowerCAmelCase : List[Any] = 1_0, 0.0
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for t in scheduler.timesteps:
lowerCAmelCase : Optional[int] = model(__lowercase , __lowercase )
lowerCAmelCase : Any = scheduler.step(__lowercase , __lowercase , __lowercase , __lowercase ).prev_sample
return sample
def lowerCamelCase__ ( self : Dict ):
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowercase )
def lowerCamelCase__ ( self : List[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowercase )
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase : Union[str, Any] = scheduler_class(**__lowercase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def lowerCamelCase__ ( self : str ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowercase , beta_end=__lowercase )
def lowerCamelCase__ ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowercase )
def lowerCamelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def lowerCamelCase__ ( self : List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowercase )
def lowerCamelCase__ ( self : List[str] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowercase )
def lowerCamelCase__ ( self : int ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowercase )
def lowerCamelCase__ ( self : Optional[int] ):
self.check_over_configs(thresholding=__lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowercase , prediction_type=__lowercase , sample_max_value=__lowercase , )
def lowerCamelCase__ ( self : Tuple ):
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=__lowercase )
def lowerCamelCase__ ( self : Any ):
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=__lowercase , num_inference_steps=__lowercase )
def lowerCamelCase__ ( self : Optional[Any] ):
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowercase , eta=__lowercase )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**__lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**__lowercase )
lowerCAmelCase : Union[str, Any] = 1_0, 0.0
scheduler.set_timesteps(__lowercase )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : List[str] = self.dummy_sample_deter
lowerCAmelCase : int = self.dummy_sample_deter + 0.1
lowerCAmelCase : Any = self.dummy_sample_deter - 0.1
lowerCAmelCase : Tuple = samplea.shape[0]
lowerCAmelCase : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase : Dict = torch.arange(__lowercase )[0:3, None].repeat(1 , __lowercase )
lowerCAmelCase : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase : Union[str, Any] = scheduler.batch_step_no_noise(__lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowercase )
lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_982 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = self.full_loop()
lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase : int = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.223_967 ) < 1E-3
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase : Dict = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase : str = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1E-2
assert abs(result_mean.item() - 0.0_684 ) < 1E-3
def lowerCamelCase__ ( self : Dict ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=__lowercase , beta_start=0.01 )
lowerCAmelCase : Any = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase : List[str] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_951 ) < 1E-3
def lowerCamelCase__ ( self : List[Any] ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase : Optional[int] = self.full_loop(set_alpha_to_one=__lowercase , beta_start=0.01 )
lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase : Any = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_941 ) < 1E-3
| 367
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Optional[Any] = '''
import os
'''
snake_case__ : Tuple = '''
def foo():
import os
return False
'''
snake_case__ : Any = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : int = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : Tuple = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 314
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case__ : Tuple = logging.get_logger(__name__)
def _snake_case ( _snake_case : Optional[int] ):
if isinstance(snake_case_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(snake_case_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(snake_case_ ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class snake_case_( UpperCamelCase__ ):
__UpperCamelCase = ["""pixel_values"""]
def __init__( self : Tuple , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : str , ):
super().__init__(**__a )
lowerCAmelCase : Dict = size if size is not None else {'''shortest_edge''': 2_5_6}
lowerCAmelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase : Optional[Any] = get_size_dict(__a , param_name='''crop_size''' )
lowerCAmelCase : Tuple = do_resize
lowerCAmelCase : Any = size
lowerCAmelCase : List[str] = do_center_crop
lowerCAmelCase : Dict = crop_size
lowerCAmelCase : Tuple = resample
lowerCAmelCase : List[str] = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = offset
lowerCAmelCase : Optional[Any] = do_normalize
lowerCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : str = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(__a , size['''shortest_edge'''] , default_to_square=__a )
elif "height" in size and "width" in size:
lowerCAmelCase : Tuple = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ):
lowerCAmelCase : int = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Dict , ):
lowerCAmelCase : Dict = image.astype(np.floataa )
if offset:
lowerCAmelCase : int = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase : List[str] = to_numpy_array(__a )
if do_resize:
lowerCAmelCase : Optional[Any] = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
lowerCAmelCase : List[str] = self.center_crop(__a , size=__a )
if do_rescale:
lowerCAmelCase : Optional[int] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
lowerCAmelCase : str = self.normalize(image=__a , mean=__a , std=__a )
lowerCAmelCase : Tuple = to_channel_dimension_format(__a , __a )
return image
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Any , ):
lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Optional[Any] = offset if offset is not None else self.offset
lowerCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase : Union[str, Any] = size if size is not None else self.size
lowerCAmelCase : Optional[int] = get_size_dict(__a , default_to_square=__a )
lowerCAmelCase : int = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : Optional[Any] = get_size_dict(__a , param_name='''crop_size''' )
if not valid_images(__a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCAmelCase : Dict = make_batched(__a )
lowerCAmelCase : Dict = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
lowerCAmelCase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=__a , tensor_type=__a )
| 368
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ):
super().__init__()
lowerCAmelCase : Dict = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : Union[str, Any] = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : str = name
def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : str = global_step_float / warmup_steps_float
lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ):
lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , )
if num_warmup_steps:
lowerCAmelCase : List[str] = WarmUp(
initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , )
if weight_decay_rate > 0.0:
lowerCAmelCase : Dict = AdamWeightDecay(
learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , )
else:
lowerCAmelCase : Any = tf.keras.optimizers.Adam(
learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case_( a__ ):
def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = weight_decay_rate
lowerCAmelCase : List[str] = include_in_weight_decay
lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ):
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : Dict = apply_state or {}
lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ):
lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class snake_case_( a__ ):
def __init__( self : Any ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = None
@property
def lowerCamelCase__ ( self : List[str] ):
if self._accum_steps is None:
lowerCAmelCase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Any ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
if not self._gradients:
lowerCAmelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 314
| 0
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _snake_case ( _snake_case : Dict ):
lowerCAmelCase : Union[str, Any] = prime_factors(lowerCAmelCase_ )
if is_square_free(lowerCAmelCase_ ):
return -1 if len(lowerCAmelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : Optional[int] ):
if not numbers:
return 0
if not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase : Optional[Any] = numbers[0]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
lowerCAmelCase : str = numbers[i]
if number < 0:
lowerCAmelCase : int = min_till_now, max_till_now
lowerCAmelCase : List[Any] = max(__SCREAMING_SNAKE_CASE , max_till_now * number )
lowerCAmelCase : int = min(__SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase : Any = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return max_prod
| 370
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _snake_case ( _snake_case : Optional[int] ):
lowerCAmelCase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape
lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase : Tuple = emb.weight.data
return lin_layer
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ):
lowerCAmelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
lowerCAmelCase : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCAmelCase : Tuple = state_dict[old_key]
return new_dict
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ):
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Tuple = 0
os.makedirs(_snake_case , exist_ok=_snake_case )
for expert in range(_snake_case ):
lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(_snake_case ):
lowerCAmelCase : List[str] = torch.load(_snake_case )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Any = os.path.join(
_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
torch.save(_snake_case , _snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_snake_case )[0]].dtype )
# Add the last block
lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_snake_case ) == 1:
lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case )
torch.save(_snake_case , _snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_snake_case , _snake_case )
# Otherwise, let's build the index
lowerCAmelCase : Dict = {}
for idx, shard in enumerate(_snake_case ):
lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' )
lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) )
for key in shard:
lowerCAmelCase : List[Any] = shard_file
# Add the metadata
lowerCAmelCase : Dict = {'''total_size''': total_size}
lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n'''
f.write(_snake_case )
return metadata, index
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
snake_case__ : List[str] = parser.parse_args()
snake_case__ , snake_case__ : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
snake_case__ : str = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 314
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
snake_case__ : Tuple = logging.getLogger(__name__)
snake_case__ : Union[str, Any] = {'''facebook/bart-base''': BartForConditionalGeneration}
snake_case__ : List[str] = {'''facebook/bart-base''': BartTokenizer}
def _snake_case ( ):
lowerCAmelCase : List[str] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=__lowerCAmelCase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=__lowerCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__lowerCAmelCase , )
parser.add_argument(
'''--config_name''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=__lowerCAmelCase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Where to store the final ONNX file.''' )
lowerCAmelCase : str = parser.parse_args()
return args
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str="cpu" ):
lowerCAmelCase : int = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Dict = None
lowerCAmelCase : List[Any] = 0
return huggingface_model, tokenizer
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Dict ):
model.eval()
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = '''My friends are cool but they eat too many carbs.'''
lowerCAmelCase : Optional[int] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
lowerCAmelCase : Union[str, Any] = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=__lowerCAmelCase , )
logger.info('''Model exported to {}'''.format(__lowerCAmelCase ) )
lowerCAmelCase : Union[str, Any] = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__lowerCAmelCase ) )
lowerCAmelCase : Dict = onnxruntime.InferenceSession(__lowerCAmelCase )
lowerCAmelCase : Dict = ort_sess.run(
__lowerCAmelCase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__lowerCAmelCase ),
'''max_length''': np.array(__lowerCAmelCase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _snake_case ( ):
lowerCAmelCase : List[Any] = parse_args()
lowerCAmelCase : List[Any] = 5
lowerCAmelCase : int = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase : List[Any] = torch.device(args.device )
lowerCAmelCase, lowerCAmelCase : Tuple = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__lowerCAmelCase )
if args.max_length:
lowerCAmelCase : Any = args.max_length
if args.num_beams:
lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
lowerCAmelCase : Optional[Any] = args.output_file_path
else:
lowerCAmelCase : Dict = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 371
|
"""simple docstring"""
from math import sqrt
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase : Dict = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase : Optional[int] = False
for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase : int = False
break
# precondition
assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool"
return status
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) )
lowerCAmelCase : Optional[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_snake_case ) ):
for j in range(i + 1 , len(_snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase : Any = 0
# filters actual prime numbers.
lowerCAmelCase : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase : Tuple = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_snake_case ):
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase : Dict = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : List[str] = number
if number == 0 or number == 1:
ans.append(_snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_snake_case ):
while quotient != 1:
if is_prime(_snake_case ) and (quotient % factor == 0):
ans.append(_snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : Tuple ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : Optional[Any] = 0
# prime factorization of 'number'
lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Any = max(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Dict ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : int = 0
# prime factorization of 'number'
lowerCAmelCase : List[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Optional[int] = min(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( _snake_case : Tuple ):
assert (
isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case )
), "'number' must been an int, even and > 2"
lowerCAmelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case )
lowerCAmelCase : Optional[Any] = len(_snake_case )
# run variable for while-loops.
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase : str = True
while i < len_pn and loop:
lowerCAmelCase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase : Dict = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (len(_snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Dict = 0
while numbera != 0:
lowerCAmelCase : Union[str, Any] = numbera % numbera
lowerCAmelCase : List[Any] = numbera
lowerCAmelCase : List[Any] = rest
# precondition
assert isinstance(_snake_case , _snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase : List[str] = prime_factorization(_snake_case )
lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case )
elif numbera == 1 or numbera == 1:
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[str] = max(_snake_case , _snake_case )
lowerCAmelCase : Dict = 0
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case )
lowerCAmelCase : Any = prime_fac_a.count(_snake_case )
for _ in range(max(_snake_case , _snake_case ) ):
ans *= n
else:
lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( _snake_case : Any ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_snake_case ):
ans += 1
# precondition
assert isinstance(_snake_case , _snake_case ) and is_prime(
_snake_case ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Dict ):
assert (
is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number
lowerCAmelCase : str = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
while number < p_number_a:
ans.append(_snake_case )
number += 1
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and ans[0] != p_number_a
and ans[len(_snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( _snake_case : List[Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_snake_case )
# precondition
assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase : int = get_divisors(_snake_case )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (divisors[0] == 1)
and (divisors[len(_snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( _snake_case : Optional[int] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase : Optional[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase : Dict = 0
lowerCAmelCase : Dict = 1
lowerCAmelCase : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase : int = ans
ans += fiba
lowerCAmelCase : Optional[Any] = tmp
return ans
| 314
| 0
|
"""simple docstring"""
from math import factorial
def _snake_case ( _snake_case : int , _snake_case : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
)
| 350
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Any = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_( a__ ):
__UpperCamelCase = '''vit_msn'''
def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : List[str] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
| 314
| 0
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 351
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case )
lowerCAmelCase : Optional[int] = {
'''repo_id''': str(_snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_snake_case , _snake_case , indent=4 )
def _snake_case ( _snake_case : Any ):
if params.n_gpu <= 0:
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Dict = True
lowerCAmelCase : int = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
lowerCAmelCase : int = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node
lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node
lowerCAmelCase : str = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = 1
lowerCAmelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase : List[Any] = params.n_nodes > 1
# summary
lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _snake_case ( _snake_case : Optional[int] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case__ : Union[str, Any] = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
lowerCAmelCase : str = sylvester(number - 1 )
lowerCAmelCase : Optional[Any] = num - 1
lowerCAmelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 314
| 0
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class snake_case_( a__ ):
__UpperCamelCase = '''t5'''
__UpperCamelCase = ['''past_key_values''']
__UpperCamelCase = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Tuple , UpperCamelCase_ : Any=3_2_1_2_8 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Tuple=6_4 , UpperCamelCase_ : Optional[int]=2_0_4_8 , UpperCamelCase_ : str=6 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[Any]=8 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=1_2_8 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Tuple=1E-6 , UpperCamelCase_ : str=1.0 , UpperCamelCase_ : Optional[Any]="relu" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : int=1 , **UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : str = d_kv
lowerCAmelCase : Dict = d_ff
lowerCAmelCase : List[Any] = num_layers
lowerCAmelCase : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase : Optional[Any] = num_heads
lowerCAmelCase : Optional[Any] = relative_attention_num_buckets
lowerCAmelCase : List[str] = relative_attention_max_distance
lowerCAmelCase : Optional[Any] = dropout_rate
lowerCAmelCase : List[str] = layer_norm_epsilon
lowerCAmelCase : List[str] = initializer_factor
lowerCAmelCase : Union[str, Any] = feed_forward_proj
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : Dict = self.feed_forward_proj.split('''-''' )
lowerCAmelCase : Optional[int] = act_info[-1]
lowerCAmelCase : Optional[int] = act_info[0] == '''gated'''
if len(UpperCamelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase : Optional[Any] = '''gelu_new'''
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ , )
class snake_case_( a__ ):
@property
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Tuple = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowerCAmelCase : Any = '''past_encoder_sequence + sequence'''
lowerCAmelCase : Optional[Any] = {0: '''batch'''}
lowerCAmelCase : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase : Any = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='''inputs''' )
return common_inputs
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return 1_3
| 353
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase : Union[str, Any] = 6
lowerCAmelCase : Any = 128
lowerCAmelCase : List[Any] = (2, 2, 18, 2)
lowerCAmelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase : Tuple = 12
lowerCAmelCase : Dict = 192
lowerCAmelCase : List[str] = (2, 2, 18, 2)
lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCAmelCase : Optional[int] = window_size
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : int = num_heads
return config
def _snake_case ( _snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase : str = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase : List[Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : Optional[Any] = int(key_split[4] )
lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase : Dict = val[:dim, :]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : str = val[
:dim
]
lowerCAmelCase : List[str] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[Any] = val[
-dim:
]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ):
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model''']
lowerCAmelCase : List[Any] = get_swin_config(_snake_case )
lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314
| 0
|
"""simple docstring"""
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _snake_case ( _snake_case : Dict , _snake_case : Tuple , _snake_case : Optional[int]=0 ):
# Format the message.
if name is None:
lowerCAmelCase : Tuple = None
else:
lowerCAmelCase : Optional[int] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
lowerCAmelCase : Dict = fmt.format(_snake_case )
# Print and recurse (if needed).
if isinstance(_snake_case , _snake_case ):
if msg is not None:
print(_snake_case )
for k in val.keys():
recursive_print(_snake_case , val[k] , spaces + 2 )
elif isinstance(_snake_case , torch.Tensor ):
print(_snake_case , ''':''' , val.size() )
else:
print(_snake_case , ''':''' , _snake_case )
def _snake_case ( _snake_case : str , _snake_case : int , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase : Any = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase : List[str] = param.view(*_snake_case )
lowerCAmelCase : List[str] = param.transpose(0 , 2 )
lowerCAmelCase : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase : Dict = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase : int = param.view(*_snake_case )
lowerCAmelCase : Tuple = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase : Any = param.view(*_snake_case )
return param
def _snake_case ( _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] ):
# The converted output model.
lowerCAmelCase : Any = {}
# old versions did not store training args
lowerCAmelCase : str = input_state_dict.get('''args''' , _snake_case )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase : Any = ds_args.padded_vocab_size
lowerCAmelCase : Optional[Any] = ds_args.max_position_embeddings
lowerCAmelCase : Tuple = ds_args.hidden_size
lowerCAmelCase : Any = ds_args.num_layers
lowerCAmelCase : Optional[Any] = ds_args.num_attention_heads
lowerCAmelCase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase : Union[str, Any] = config.n_head
# The hidden_size per head.
lowerCAmelCase : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase : int = input_state_dict['''checkpoint_version''']
else:
lowerCAmelCase : Optional[Any] = 0.0
# The model.
lowerCAmelCase : str = input_state_dict['''model''']
# The language model.
lowerCAmelCase : List[str] = model['''language_model''']
# The embeddings.
lowerCAmelCase : List[Any] = lm['''embedding''']
# The word embeddings.
lowerCAmelCase : Optional[Any] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase : str = word_embeddings[: config.vocab_size, :]
lowerCAmelCase : Tuple = word_embeddings
# The position embeddings.
lowerCAmelCase : str = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase : str = pos_embeddings
# The transformer.
lowerCAmelCase : Tuple = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
lowerCAmelCase : int = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
lowerCAmelCase : List[str] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase : int = layer_re.match(_snake_case )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase : Any = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase : List[Any] = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase : Tuple = m.group(3 )
# The name of the layer.
lowerCAmelCase : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
lowerCAmelCase : List[str] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
lowerCAmelCase : Any = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase : Union[str, Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _snake_case , _snake_case )
lowerCAmelCase : Optional[int] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase : int = torch.tensor(-1E4 , dtype=torch.floataa )
lowerCAmelCase : str = masked_bias
lowerCAmelCase : Any = fix_query_key_value_ordering(_snake_case , _snake_case , 3 , _snake_case , _snake_case )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase : str = fix_query_key_value_ordering(_snake_case , _snake_case , 3 , _snake_case , _snake_case )
# Store. No change of shape.
lowerCAmelCase : int = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase : Union[str, Any] = megatron_to_transformers[op_name]
lowerCAmelCase : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase : Dict = megatron_to_transformers[op_name]
lowerCAmelCase : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase : Tuple = transformer['''final_layernorm.weight''']
lowerCAmelCase : Optional[int] = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase : List[str] = word_embeddings
# It should be done!
return output_state_dict
def _snake_case ( ):
# Create the argument parser.
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=_snake_case , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_snake_case , help='''An optional config json file describing the pre-trained model.''' , )
lowerCAmelCase : Optional[int] = parser.parse_args()
# Extract the basename.
lowerCAmelCase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
lowerCAmelCase : Optional[int] = torch.load(_snake_case , map_location='''cpu''' )
else:
lowerCAmelCase : int = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
lowerCAmelCase : Tuple = input_state_dict.get('''args''' , _snake_case )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase : Optional[Any] = '''gelu_fast'''
elif ds_args.openai_gelu:
lowerCAmelCase : List[Any] = '''gelu_new'''
else:
lowerCAmelCase : List[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase : str = '''gelu_new'''
# Spell out all parameters in case the defaults change.
lowerCAmelCase : Union[str, Any] = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_snake_case , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_snake_case , summary_activation=_snake_case , summary_proj_to_labels=_snake_case , summary_first_dropout=0.1 , scale_attn_weights=_snake_case , use_cache=_snake_case , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase : Dict = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase : Any = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
lowerCAmelCase : List[Any] = convert_megatron_checkpoint(_snake_case , _snake_case , _snake_case )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_snake_case , _snake_case )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase : Union[str, Any] = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase : Union[str, Any] = '''gpt2'''
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_snake_case )
lowerCAmelCase : Any = type(_snake_case ).__name__
lowerCAmelCase : str = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(_snake_case )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(_snake_case )
# Store the state_dict to file.
lowerCAmelCase : str = os.path.join(_snake_case , '''pytorch_model.bin''' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(_snake_case , _snake_case )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 354
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = image[0].size
lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase : List[str] = 2.0 * image - 1.0
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 )
return image
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : str = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = mask[0].size
lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 )
return mask
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = image
lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ )
lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ )
lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : Union[str, Any] = original_image.shape
lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device )
lowerCAmelCase : Optional[int] = eta
lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1
lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = t
lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Tuple = git.Repo(search_parent_directories=_snake_case )
lowerCAmelCase : Optional[int] = {
'''repo_id''': str(_snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_snake_case , _snake_case , indent=4 )
def _snake_case ( _snake_case : Any ):
if params.n_gpu <= 0:
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Dict = True
lowerCAmelCase : int = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase : str = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
lowerCAmelCase : int = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCAmelCase : Dict = params.world_size // params.n_gpu_per_node
lowerCAmelCase : int = params.global_rank // params.n_gpu_per_node
lowerCAmelCase : str = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = 1
lowerCAmelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase : Tuple = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase : List[Any] = params.n_nodes > 1
# summary
lowerCAmelCase : Optional[int] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _snake_case ( _snake_case : Optional[int] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 355
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : int = -1
lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : str = TextStreamer(UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Any = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
lowerCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = -1
lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ )
lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = -1
lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
lowerCAmelCase : str = -1
lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 )
lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : List[str] = ''''''
for new_text in streamer:
streamer_text += new_text
| 314
| 0
|
"""simple docstring"""
from __future__ import annotations
snake_case__ : List[str] = list[tuple[int, int]]
snake_case__ : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case__ : Tuple = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class snake_case_:
def __init__( self : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : Node | None , ):
lowerCAmelCase : Tuple = pos_x
lowerCAmelCase : Union[str, Any] = pos_y
lowerCAmelCase : Tuple = (pos_y, pos_x)
lowerCAmelCase : List[str] = goal_x
lowerCAmelCase : Tuple = goal_y
lowerCAmelCase : Tuple = g_cost
lowerCAmelCase : Dict = parent
lowerCAmelCase : Optional[int] = self.calculate_heuristic()
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = abs(self.pos_x - self.goal_x )
lowerCAmelCase : Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : str , UpperCamelCase_ : Optional[int] ):
return self.f_cost < other.f_cost
class snake_case_:
def __init__( self : int , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : tuple[int, int] ):
lowerCAmelCase : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase_ )
lowerCAmelCase : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , UpperCamelCase_ )
lowerCAmelCase : Tuple = [self.start]
lowerCAmelCase : list[Node] = []
lowerCAmelCase : List[str] = False
def lowerCamelCase__ ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase : Any = True
return self.retrace_path(UpperCamelCase_ )
self.closed_nodes.append(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.get_successors(UpperCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase_ )
else:
# retrieve the best current path
lowerCAmelCase : Any = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase_ )
else:
self.open_nodes.append(UpperCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Node ):
lowerCAmelCase : Any = []
for action in delta:
lowerCAmelCase : Dict = parent.pos_x + action[1]
lowerCAmelCase : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase_ , ) )
return successors
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Node | None ):
lowerCAmelCase : List[str] = node
lowerCAmelCase : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
snake_case__ : Optional[Any] = (0, 0)
snake_case__ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
snake_case__ : Any = GreedyBestFirst(init, goal)
snake_case__ : List[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
snake_case__ : List[str] = 2
for elem in grid:
print(elem)
| 356
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case__ : Optional[Any] = False
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ):
set_seed(0 )
lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 )
lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
lowerCAmelCase : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 314
| 0
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : str = TypeVar('''T''')
class snake_case_( Generic[T] ):
__UpperCamelCase = 42 # Cache store of keys
__UpperCamelCase = 42 # References of the keys in cache
__UpperCamelCase = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = deque()
lowerCAmelCase : Optional[Any] = set()
if not n:
lowerCAmelCase : Tuple = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase : Union[str, Any] = n
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase : Tuple = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase_ )
else:
self.dq_store.remove(UpperCamelCase_ )
self.dq_store.appendleft(UpperCamelCase_ )
self.key_reference.add(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
for k in self.dq_store:
print(UpperCamelCase_ )
def __repr__( self : Union[str, Any] ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 357
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ):
lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ )
lowerCAmelCase : Any = nsfw_detected.flatten()
lowerCAmelCase : Dict = nsfw_detected > p_threshold
lowerCAmelCase : int = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowerCAmelCase : List[Any] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Union[str, Any] = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case__ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Union[str, Any] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
snake_case__ : Optional[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BertTokenizer
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Optional[int] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCAmelCase : Tuple = do_lower_case
lowerCAmelCase : Union[str, Any] = strip_accents
lowerCAmelCase : Tuple = tokenize_chinese_chars
lowerCAmelCase : str = normalizer_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ):
lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 359
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : str , _snake_case : str ):
lowerCAmelCase : Optional[int] = len(_snake_case )
lowerCAmelCase : List[Any] = []
for i in range(len(_snake_case ) - pat_len + 1 ):
lowerCAmelCase : Union[str, Any] = True
for j in range(_snake_case ):
if s[i + j] != pattern[j]:
lowerCAmelCase : str = False
break
if match_found:
position.append(_snake_case )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 360
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 50000000 ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : List[Any] = int((limit - 24) ** (1 / 2) )
lowerCAmelCase : Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _snake_case ) ) )
for primea in primes:
lowerCAmelCase : Optional[Any] = primea * primea
for primea in primes:
lowerCAmelCase : List[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCAmelCase : Tuple = primea * primea * primea * primea
lowerCAmelCase : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
| 0
|
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class snake_case_( a__ ):
def __init__( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
def __call__( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
lowerCAmelCase : Optional[int] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
lowerCAmelCase : List[str] = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase_ )
return result
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Tuple = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor''']
snake_case__ : List[Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
snake_case__ : Optional[Any] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class snake_case_:
def __init__( self : Union[str, Any] ):
lowerCAmelCase : list[Any] = []
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
def lowerCamelCase__ ( self : Dict ):
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Any ):
self.data.append(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = self.tail + 1
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = self.data[self.head]
lowerCAmelCase : Optional[int] = self.head + 1
return ret
def lowerCamelCase__ ( self : Tuple ):
return self.tail - self.head
def lowerCamelCase__ ( self : Any ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : List[str] = data
lowerCAmelCase : MyNode | None = None
lowerCAmelCase : MyNode | None = None
lowerCAmelCase : int = 1
def lowerCamelCase__ ( self : int ):
return self.data
def lowerCamelCase__ ( self : Optional[Any] ):
return self.left
def lowerCamelCase__ ( self : List[str] ):
return self.right
def lowerCamelCase__ ( self : Optional[int] ):
return self.height
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Dict = data
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : MyNode | None ):
lowerCAmelCase : Optional[Any] = node
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : MyNode | None ):
lowerCAmelCase : str = node
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
lowerCAmelCase : Tuple = height
def _snake_case ( _snake_case : MyNode | None ):
if node is None:
return 0
return node.get_height()
def _snake_case ( _snake_case : int , _snake_case : int ):
if a > b:
return a
return b
def _snake_case ( _snake_case : MyNode ):
print('''left rotation node:''' , node.get_data() )
lowerCAmelCase : Any = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_snake_case )
lowerCAmelCase : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_snake_case )
lowerCAmelCase : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_snake_case )
return ret
def _snake_case ( _snake_case : MyNode ):
print('''right rotation node:''' , node.get_data() )
lowerCAmelCase : List[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_snake_case )
lowerCAmelCase : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_snake_case )
lowerCAmelCase : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_snake_case )
return ret
def _snake_case ( _snake_case : MyNode ):
lowerCAmelCase : List[str] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_snake_case ) )
return right_rotation(_snake_case )
def _snake_case ( _snake_case : MyNode ):
lowerCAmelCase : List[str] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_snake_case ) )
return left_rotation(_snake_case )
def _snake_case ( _snake_case : MyNode | None , _snake_case : Any ):
if node is None:
return MyNode(_snake_case )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _snake_case ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowerCAmelCase : Dict = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCAmelCase : Optional[Any] = right_rotation(_snake_case )
else:
lowerCAmelCase : Tuple = lr_rotation(_snake_case )
else:
node.set_right(insert_node(node.get_right() , _snake_case ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowerCAmelCase : str = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCAmelCase : Any = rl_rotation(_snake_case )
else:
lowerCAmelCase : Tuple = left_rotation(_snake_case )
lowerCAmelCase : Dict = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_snake_case )
return node
def _snake_case ( _snake_case : MyNode ):
while True:
lowerCAmelCase : str = root.get_right()
if right_child is None:
break
lowerCAmelCase : List[str] = right_child
return root.get_data()
def _snake_case ( _snake_case : MyNode ):
while True:
lowerCAmelCase : Dict = root.get_left()
if left_child is None:
break
lowerCAmelCase : List[Any] = left_child
return root.get_data()
def _snake_case ( _snake_case : MyNode , _snake_case : Any ):
lowerCAmelCase : Any = root.get_left()
lowerCAmelCase : int = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCAmelCase : List[Any] = get_left_most(_snake_case )
root.set_data(_snake_case )
root.set_right(del_node(_snake_case , _snake_case ) )
elif left_child is not None:
lowerCAmelCase : Dict = left_child
elif right_child is not None:
lowerCAmelCase : Tuple = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(_snake_case , _snake_case ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_snake_case , _snake_case ) )
if get_height(_snake_case ) - get_height(_snake_case ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowerCAmelCase : Any = left_rotation(_snake_case )
else:
lowerCAmelCase : List[str] = rl_rotation(_snake_case )
elif get_height(_snake_case ) - get_height(_snake_case ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowerCAmelCase : List[str] = right_rotation(_snake_case )
else:
lowerCAmelCase : Optional[int] = lr_rotation(_snake_case )
lowerCAmelCase : Union[str, Any] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_snake_case )
return root
class snake_case_:
def __init__( self : Union[str, Any] ):
lowerCAmelCase : MyNode | None = None
def lowerCamelCase__ ( self : int ):
return get_height(self.root )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any ):
print('''insert:''' + str(UpperCamelCase_ ) )
lowerCAmelCase : str = insert_node(self.root , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Any ):
print('''delete:''' + str(UpperCamelCase_ ) )
if self.root is None:
print('''Tree is empty!''' )
return
lowerCAmelCase : int = del_node(self.root , UpperCamelCase_ )
def __str__( self : List[str] , ): # a level traversale, gives a more intuitive look on the tree
lowerCAmelCase : List[str] = ''''''
lowerCAmelCase : int = MyQueue()
q.push(self.root )
lowerCAmelCase : Any = self.get_height()
if layer == 0:
return output
lowerCAmelCase : Union[str, Any] = 0
while not q.is_empty():
lowerCAmelCase : int = q.pop()
lowerCAmelCase : int = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase_ )
q.push(UpperCamelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCAmelCase : Dict = cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , UpperCamelCase_ ) - 1:
lowerCAmelCase : Union[str, Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _snake_case ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
snake_case__ : int = AVLtree()
snake_case__ : Optional[int] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 362
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ):
lowerCAmelCase : Tuple = '''bilinear'''
lowerCAmelCase : List[Any] = max_size
lowerCAmelCase : Optional[int] = short_edge_length
def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = []
for img in imgs:
lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w
else:
lowerCAmelCase, lowerCAmelCase : int = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = newh * scale
lowerCAmelCase : str = neww * scale
lowerCAmelCase : Union[str, Any] = int(neww + 0.5 )
lowerCAmelCase : str = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCAmelCase : Optional[int] = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class snake_case_:
def __init__( self : Tuple , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT
lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY
lowerCAmelCase : int = cfg.PAD_VALUE
lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE
lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
lowerCAmelCase : Dict = [im.shape[-2:] for im in images]
lowerCAmelCase : Dict = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ):
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : List[Any] = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] )
lowerCAmelCase : str = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( _snake_case : str , _snake_case : List[Any] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ):
assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!"
lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=_snake_case )
tensor[:, 1].clamp_(min=0 , max=_snake_case )
tensor[:, 2].clamp_(min=0 , max=_snake_case )
tensor[:, 3].clamp_(min=0 , max=_snake_case )
| 314
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
lowerCAmelCase : Optional[Any] = hex_num[0] == '''-'''
if is_negative:
lowerCAmelCase : List[str] = hex_num[1:]
try:
lowerCAmelCase : Optional[int] = int(_snake_case , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
lowerCAmelCase : str = ''''''
while int_num > 0:
lowerCAmelCase : str = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase : str = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
lowerCAmelCase : List[str] = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] )
lowerCAmelCase : Optional[Any] = bert_tokens
lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case )
while start < end:
lowerCAmelCase : str = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : List[Any] = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j]
lowerCAmelCase : Union[str, Any] = start + i
lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : int = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
lowerCAmelCase : Optional[int] = []
for id in input_ids:
lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
lowerCAmelCase : Any = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[str] = f.readlines()
lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
snake_case__ : int = parser.parse_args()
main(args)
| 314
| 0
|
"""simple docstring"""
snake_case__ : int = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
snake_case__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _snake_case ( _snake_case : str ):
if set(_snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowerCAmelCase : Union[str, Any] = ''''''
for word in coded.split():
while len(_snake_case ) != 0:
decoded += decode_dict[word[:5]]
lowerCAmelCase : Optional[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'''nielsr/canine-s''': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
snake_case__ : Optional[int] = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
snake_case__ : int = 0
snake_case__ : str = 0xE_0_0_0
snake_case__ : Optional[int] = 0xE_0_0_1
snake_case__ : List[str] = 0xE_0_0_2
snake_case__ : Any = 0xE_0_0_3
snake_case__ : Dict = 0xE_0_0_4
# Maps special codepoints to human-readable names.
snake_case__ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
snake_case__ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class snake_case_( a__ ):
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCamelCase_ : List[str]=chr(UpperCamelCase_ ) , UpperCamelCase_ : Tuple=chr(UpperCamelCase_ ) , UpperCamelCase_ : str=chr(UpperCamelCase_ ) , UpperCamelCase_ : Any=chr(UpperCamelCase_ ) , UpperCamelCase_ : Optional[Any]=chr(UpperCamelCase_ ) , UpperCamelCase_ : Dict=chr(UpperCamelCase_ ) , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Any=2_0_4_8 , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
lowerCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , model_max_length=UpperCamelCase_ , **UpperCamelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase : Union[str, Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase : Dict = UNICODE_VOCAB_SIZE
lowerCAmelCase : Tuple = len(self._special_codepoints )
@property
def lowerCamelCase__ ( self : Dict ):
return self._unicode_vocab_size
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str ):
return list(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str ):
try:
return ord(UpperCamelCase_ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : int ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase_ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str ):
return "".join(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : List[str] = [self.sep_token_id]
lowerCAmelCase : Dict = [self.cls_token_id]
lowerCAmelCase : Union[str, Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
lowerCAmelCase : Dict = [1] + ([0] * len(UpperCamelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase_ )) + [1]
return result
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : List[str] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
lowerCAmelCase : str = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
return ()
| 365
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _snake_case ( _snake_case : Any ):
if not is_accelerate_available():
return method
lowerCAmelCase : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(_snake_case ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : List[Any] , *_snake_case : Any , **_snake_case : Dict ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_snake_case , **_snake_case )
return wrapper
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314
| 0
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Tuple = '''https://openaipublic.azureedge.net/jukebox/models/'''
snake_case__ : int = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _snake_case ( _snake_case : Union[str, Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase : Optional[Any] = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase : List[str] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase : Optional[Any] = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase : Tuple = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCAmelCase : Optional[Any] = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase : Union[str, Any] = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCAmelCase : int = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : List[Any] ):
lowerCAmelCase : List[Any] = {}
import re
lowerCAmelCase : List[str] = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase : List[str] = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase : Union[str, Any] = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase : Optional[int] = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase : Union[str, Any] = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase : Tuple = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase : Dict = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase : Optional[Any] = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase : Tuple = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_snake_case ):
lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.match(_snake_case )
lowerCAmelCase : Tuple = regex_match.groups()
lowerCAmelCase : Tuple = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase : Union[str, Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_snake_case , _snake_case )
elif re_encoder_block_resnet.fullmatch(_snake_case ):
lowerCAmelCase : Union[str, Any] = re_encoder_block_resnet.match(_snake_case )
lowerCAmelCase : Any = regex_match.groups()
lowerCAmelCase : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase : Dict = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase : Optional[int] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCAmelCase : Optional[int] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCAmelCase : int = prefix + resnet_block
lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_snake_case , _snake_case )
elif re_encoder_block_proj_out.fullmatch(_snake_case ):
lowerCAmelCase : List[str] = re_encoder_block_proj_out.match(_snake_case )
lowerCAmelCase : List[Any] = regex_match.groups()
lowerCAmelCase : str = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCAmelCase : str = re_encoder_block_proj_out.sub(_snake_case , _snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_snake_case ):
lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_snake_case )
lowerCAmelCase : int = regex_match.groups()
lowerCAmelCase : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase : List[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCAmelCase : Optional[Any] = re_decoder_block_conv_out.sub(_snake_case , _snake_case )
elif re_decoder_block_resnet.fullmatch(_snake_case ):
lowerCAmelCase : int = re_decoder_block_resnet.match(_snake_case )
lowerCAmelCase : Dict = regex_match.groups()
lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase : List[str] = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCAmelCase : Dict = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCAmelCase : Optional[Any] = prefix + resnet_block
lowerCAmelCase : Any = re_decoder_block_resnet.sub(_snake_case , _snake_case )
elif re_decoder_block_proj_in.fullmatch(_snake_case ):
lowerCAmelCase : Tuple = re_decoder_block_proj_in.match(_snake_case )
lowerCAmelCase : int = regex_match.groups()
lowerCAmelCase : List[str] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCAmelCase : Tuple = re_decoder_block_proj_in.sub(_snake_case , _snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_snake_case ):
lowerCAmelCase : Any = re_prior_cond_conv_out.match(_snake_case )
lowerCAmelCase : List[str] = regex_match.groups()
lowerCAmelCase : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCAmelCase : int = re_prior_cond_conv_out.sub(_snake_case , _snake_case )
elif re_prior_cond_resnet.fullmatch(_snake_case ):
lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_snake_case )
lowerCAmelCase : Any = regex_match.groups()
lowerCAmelCase : int = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase : Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase : Tuple = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCAmelCase : str = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCAmelCase : int = prefix + resnet_block
lowerCAmelCase : Any = re_prior_cond_resnet.sub(_snake_case , _snake_case )
elif re_prior_cond_proj_in.fullmatch(_snake_case ):
lowerCAmelCase : int = re_prior_cond_proj_in.match(_snake_case )
lowerCAmelCase : int = regex_match.groups()
lowerCAmelCase : int = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCAmelCase : Optional[int] = re_prior_cond_proj_in.sub(_snake_case , _snake_case )
# keep original key
else:
lowerCAmelCase : int = original_key
lowerCAmelCase : Dict = replace_key(_snake_case )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
lowerCAmelCase : Any = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCAmelCase : int = original_key
lowerCAmelCase : Tuple = original_key
lowerCAmelCase : List[str] = value
return new_dict
@torch.no_grad()
def _snake_case ( _snake_case : Tuple=None , _snake_case : Dict=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
lowerCAmelCase : Tuple = requests.get(f'''{PREFIX}{file}''' , allow_redirects=_snake_case )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=_snake_case )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , '''wb''' ).write(r.content )
lowerCAmelCase : Any = MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_snake_case )
lowerCAmelCase : Optional[Any] = JukeboxModel(_snake_case )
lowerCAmelCase : Tuple = []
lowerCAmelCase : Optional[int] = {}
for i, dict_name in enumerate(_snake_case ):
lowerCAmelCase : Optional[Any] = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['''model''']
lowerCAmelCase : List[Any] = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCAmelCase : Dict = old_dic[k]
elif k.endswith('''.w''' ):
lowerCAmelCase : Optional[int] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase : str = old_dic[k]
else:
lowerCAmelCase : int = old_dic[k]
lowerCAmelCase : Dict = '''vqvae''' if i == 0 else f'''priors.{3 - i}'''
lowerCAmelCase : List[Any] = fix_jukebox_keys(_snake_case , model.state_dict() , _snake_case , _snake_case )
weight_dict.append(_snake_case )
lowerCAmelCase : Dict = weight_dict.pop(0 )
model.vqvae.load_state_dict(_snake_case )
for i in range(len(_snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(_snake_case , _snake_case )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
return weight_dict
if __name__ == "__main__":
snake_case__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
snake_case__ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 367
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Optional[Any] = '''
import os
'''
snake_case__ : Tuple = '''
def foo():
import os
return False
'''
snake_case__ : Any = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : int = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : Tuple = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 314
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 1000000 ):
lowerCAmelCase : Optional[int] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _snake_case ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 368
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ):
super().__init__()
lowerCAmelCase : Dict = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : Union[str, Any] = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : str = name
def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : str = global_step_float / warmup_steps_float
lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ):
lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , )
if num_warmup_steps:
lowerCAmelCase : List[str] = WarmUp(
initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , )
if weight_decay_rate > 0.0:
lowerCAmelCase : Dict = AdamWeightDecay(
learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , )
else:
lowerCAmelCase : Any = tf.keras.optimizers.Adam(
learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case_( a__ ):
def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = weight_decay_rate
lowerCAmelCase : List[str] = include_in_weight_decay
lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ):
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : Dict = apply_state or {}
lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ):
lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class snake_case_( a__ ):
def __init__( self : Any ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = None
@property
def lowerCamelCase__ ( self : List[str] ):
if self._accum_steps is None:
lowerCAmelCase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Any ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
if not self._gradients:
lowerCAmelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 314
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.