code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = CycleDiffusionPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self : int ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(A_ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ ( self : str , A_ : List[str] , A_ : Optional[Any]=0 ) -> Any:
"""simple docstring"""
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase_ = image / 2 + 0.5
if str(A_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(A_ )
else:
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = CycleDiffusionPipeline(**A_ )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = self.get_dummy_inputs(A_ )
lowerCamelCase_ = pipe(**A_ )
lowerCamelCase_ = output.images
lowerCamelCase_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(A_ , 'half' ):
lowerCamelCase_ = module.half()
lowerCamelCase_ = CycleDiffusionPipeline(**A_ )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = self.get_dummy_inputs(A_ )
lowerCamelCase_ = pipe(**A_ )
lowerCamelCase_ = output.images
lowerCamelCase_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def a__ ( self : str ) -> int:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
lowerCamelCase_ = init_image.resize((512, 512) )
lowerCamelCase_ = 'CompVis/stable-diffusion-v1-4'
lowerCamelCase_ = DDIMScheduler.from_pretrained(A_ , subfolder='scheduler' )
lowerCamelCase_ = CycleDiffusionPipeline.from_pretrained(
A_ , scheduler=A_ , safety_checker=A_ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
lowerCamelCase_ = 'A black colored car'
lowerCamelCase_ = 'A blue colored car'
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=A_ , source_prompt=A_ , image=A_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=A_ , output_type='np' , )
lowerCamelCase_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
lowerCamelCase_ = init_image.resize((512, 512) )
lowerCamelCase_ = 'CompVis/stable-diffusion-v1-4'
lowerCamelCase_ = DDIMScheduler.from_pretrained(A_ , subfolder='scheduler' )
lowerCamelCase_ = CycleDiffusionPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
lowerCamelCase_ = 'A black colored car'
lowerCamelCase_ = 'A blue colored car'
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=A_ , source_prompt=A_ , image=A_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=A_ , output_type='np' , )
lowerCamelCase_ = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 651
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int:
"""simple docstring"""
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(A_ , mode=A_ )
lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(A_ , 'w' , newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , 'r' ) as f:
self.assertTrue(f.read() , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
| 651
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features['label'].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features['label'].names
# Labels
lowerCamelCase_ = len(lowercase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , idalabel={str(lowercase ): label for i, label in enumerate(lowercase )} , labelaid={label: i for i, label in enumerate(lowercase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(lowercase : Optional[Any] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=lowercase , max_length=data_args.max_seq_length , truncation=lowercase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(lowercase ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCamelCase_ = train_dataset.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCamelCase_ = eval_dataset.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(lowercase ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCamelCase_ = predict_dataset.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCamelCase_ = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase : EvalPrediction ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
lowerCamelCase_ = np.argmax(lowercase , axis=1 )
return metric.compute(predictions=lowercase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowercase )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
lowerCamelCase_ = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowercase )
trainer.save_metrics('train' , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowercase )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
lowerCamelCase_ = min(lowercase , len(lowercase ) )
trainer.log_metrics('eval' , lowercase )
trainer.save_metrics('eval' , lowercase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(lowercase , metric_key_prefix='predict' )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowercase )
)
lowerCamelCase_ = min(lowercase , len(lowercase ) )
trainer.log_metrics('predict' , lowercase )
trainer.save_metrics('predict' , lowercase )
lowerCamelCase_ = np.argmax(lowercase , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowercase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowercase ):
lowerCamelCase_ = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 651
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({'''summary''': Value('''string''' )} )
UpperCamelCase = "text"
UpperCamelCase = "summary"
@property
def a__ ( self : int ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 651
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 1
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = filter(lambda lowercase : p.requires_grad , model.parameters() )
lowerCamelCase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase : int = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Any ):
'''simple docstring'''
if metric == "rouge2":
lowerCamelCase_ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
lowerCamelCase_ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
lowerCamelCase_ = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
lowerCamelCase_ = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=f"""val_{metric}""" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=lowercase , verbose=lowercase , )
class A( pl.Callback ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : List[Any] , A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A_ )
@rank_zero_only
def a__ ( self : Optional[Any] , A_ : pl.Trainer , A_ : pl.LightningModule , A_ : str , A_ : Dict=True ) -> None:
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowerCamelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
lowerCamelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCamelCase_ = od / 'test_results.txt'
lowerCamelCase_ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCamelCase_ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowerCamelCase_ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=A_ )
generations_file.parent.mkdir(exist_ok=A_ )
with open(A_ , 'a+' ) as writer:
for key in sorted(A_ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCamelCase_ = metrics[key]
if isinstance(A_ , torch.Tensor ):
lowerCamelCase_ = val.item()
lowerCamelCase_ = f"""{key}: {val:.6f}\n"""
writer.write(A_ )
if not save_generations:
return
if "preds" in metrics:
lowerCamelCase_ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(A_ )
@rank_zero_only
def a__ ( self : Union[str, Any] , A_ : Optional[int] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
lowerCamelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCamelCase_ = pl_module.model.num_parameters()
lowerCamelCase_ = count_trainable_parameters(A_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def a__ ( self : Optional[int] , A_ : pl.Trainer , A_ : pl.LightningModule ) -> List[Any]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A_ , A_ , 'test' )
@rank_zero_only
def a__ ( self : Optional[Any] , A_ : pl.Trainer , A_ : Optional[int] ) -> Tuple:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 651
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class A( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''maskformer-swin'''
UpperCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , A_ : Optional[Any]=224 , A_ : Dict=4 , A_ : str=3 , A_ : List[str]=96 , A_ : Union[str, Any]=[2, 2, 6, 2] , A_ : int=[3, 6, 12, 24] , A_ : Tuple=7 , A_ : Optional[Any]=4.0 , A_ : List[Any]=True , A_ : str=0.0 , A_ : str=0.0 , A_ : Dict=0.1 , A_ : Union[str, Any]="gelu" , A_ : Dict=False , A_ : List[str]=0.02 , A_ : List[str]=1E-5 , A_ : Union[str, Any]=None , A_ : Union[str, Any]=None , **A_ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(A_ ) - 1) )
lowerCamelCase_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(A_ ) + 1 )]
lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 651
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 1
|
import argparse
import os
import re
lowerCamelCase : List[str] = "src/diffusers"
# Pattern that looks at the indentation in a line.
lowerCamelCase : Dict = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase : Dict = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase : str = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase : str = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase : Any = re.compile(r"\[([^\]]+)\]")
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = _re_indent.search(lowercase )
return "" if search is None else search.groups()[0]
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Tuple="" , lowercase : List[str]=None , lowercase : Any=None ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowercase ):
index += 1
lowerCamelCase_ = ['\n'.join(lines[:index] )]
else:
lowerCamelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase_ = [lines[index]]
index += 1
while index < len(lowercase ) and (end_prompt is None or not lines[index].startswith(lowercase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowercase ) )
if index < len(lowercase ) - 1:
lowerCamelCase_ = [lines[index + 1]]
index += 1
else:
lowerCamelCase_ = []
else:
blocks.append('\n'.join(lowercase ) )
lowerCamelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase ) > 0:
blocks.append('\n'.join(lowercase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
def _inner(lowercase : str ):
return key(lowercase ).lower().replace('_' , '' )
return _inner
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any]=None ):
'''simple docstring'''
def noop(lowercase : int ):
return x
if key is None:
lowerCamelCase_ = noop
# Constants are all uppercase, they go first.
lowerCamelCase_ = [obj for obj in objects if key(lowercase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase_ = [obj for obj in objects if key(lowercase )[0].isupper() and not key(lowercase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase_ = [obj for obj in objects if not key(lowercase )[0].isupper()]
lowerCamelCase_ = ignore_underscore(lowercase )
return sorted(lowercase , key=lowercase ) + sorted(lowercase , key=lowercase ) + sorted(lowercase , key=lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
def _replace(lowercase : Dict ):
lowerCamelCase_ = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCamelCase_ = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(lowercase )] ) + "]"
lowerCamelCase_ = import_statement.split('\n' )
if len(lowercase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase_ = 2 if lines[1].strip() == '[' else 1
lowerCamelCase_ = [(i, _re_strip_line.search(lowercase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase_ = sort_objects(lowercase , key=lambda lowercase : x[1] )
lowerCamelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCamelCase_ = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
lowerCamelCase_ = get_indent(lines[1] ) + ', '.join([f"""\"{k}\"""" for k in sort_objects(lowercase )] )
return "\n".join(lowercase )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase_ = _re_bracket_content.sub(_replace , lowercase )
return import_statement
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict=True ):
'''simple docstring'''
with open(lowercase , 'r' ) as f:
lowerCamelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase_ = split_code_in_indented_blocks(
lowercase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase_ = main_blocks[block_idx]
lowerCamelCase_ = block.split('\n' )
# Get to the start of the imports.
lowerCamelCase_ = 0
while line_idx < len(lowercase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase_ = len(lowercase )
else:
line_idx += 1
if line_idx >= len(lowercase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase_ = '\n'.join(block_lines[line_idx:-1] )
lowerCamelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase_ = split_code_in_indented_blocks(lowercase , indent_level=lowercase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase_ = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase_ = [(pattern.search(lowercase ).groups()[0] if pattern.search(lowercase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase_ = [(i, key) for i, key in enumerate(lowercase ) if key is not None]
lowerCamelCase_ = [x[0] for x in sorted(lowercase , key=lambda lowercase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase_ = 0
lowerCamelCase_ = []
for i in range(len(lowercase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase_ = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(lowercase , 'w' ) as f:
f.write('\n'.join(lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str=True ):
'''simple docstring'''
lowerCamelCase_ = []
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
lowerCamelCase_ = sort_imports(os.path.join(lowercase , '__init__.py' ) , check_only=lowercase )
if result:
lowerCamelCase_ = [os.path.join(lowercase , '__init__.py' )]
if len(lowercase ) > 0:
raise ValueError(f"""Would overwrite {len(lowercase )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCamelCase : Tuple = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 651
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 1
|
from functools import lru_cache
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 2
lowerCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowercase )
if n > 1:
factors.add(lowercase )
return factors
@lru_cache
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
return len(unique_prime_factors(lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
return len(set(lowercase ) ) in (0, 1)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 2
while True:
# Increment each value of a generated range
lowerCamelCase_ = [base + i for i in range(lowercase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCamelCase_ = [upf_len(lowercase ) for x in group]
checker.append(lowercase )
# If all numbers in the list are equal, return the group variable.
if equality(lowercase ):
return group
# Increment our base variable by 1
base += 1
def _SCREAMING_SNAKE_CASE ( lowercase : int = 4 ):
'''simple docstring'''
lowerCamelCase_ = run(lowercase )
return results[0] if len(lowercase ) else None
if __name__ == "__main__":
print(solution())
| 651
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Optional[Any] = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 1
|
import qiskit
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = qubits
# Using Aer's simulator
lowerCamelCase_ = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase_ = qiskit.QuantumCircuit(lowercase , lowercase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowercase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowercase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowercase ) ) , list(range(lowercase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase_ = qiskit.execute(lowercase , lowercase , shots=10_00 )
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 651
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 1
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BioGptTokenizer
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(A_ ) )
def a__ ( self : Optional[int] , A_ : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = 'lower newer'
return input_text, output_text
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = 'lower'
lowerCamelCase_ = ['low', 'er</w>']
lowerCamelCase_ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokens + ['<unk>']
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 651
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 1
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _SCREAMING_SNAKE_CASE ( *lowercase : Dict ):
'''simple docstring'''
with open(lowercase , 'r' ) as fh:
fcntl.flock(lowercase , fcntl.LOCK_EX )
try:
print(*lowercase )
finally:
fcntl.flock(lowercase , fcntl.LOCK_UN )
lowerCamelCase : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowerCamelCase : Tuple = torch.device("cuda", local_rank)
lowerCamelCase : int = socket.gethostname()
lowerCamelCase : Any = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCamelCase : int = dist.get_rank()
lowerCamelCase : Optional[int] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 651
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : int = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self : int , A_ : UNetaDModel , A_ : ScoreSdeVeScheduler ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self : List[str] , A_ : int = 1 , A_ : int = 2000 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[str] = "pil" , A_ : bool = True , **A_ : str , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_ = self.unet.config.sample_size
lowerCamelCase_ = (batch_size, 3, img_size, img_size)
lowerCamelCase_ = self.unet
lowerCamelCase_ = randn_tensor(A_ , generator=A_ ) * self.scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(self.device )
self.scheduler.set_timesteps(A_ )
self.scheduler.set_sigmas(A_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCamelCase_ = self.unet(A_ , A_ ).sample
lowerCamelCase_ = self.scheduler.step_correct(A_ , A_ , generator=A_ ).prev_sample
# prediction step
lowerCamelCase_ = model(A_ , A_ ).sample
lowerCamelCase_ = self.scheduler.step_pred(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_ , lowerCamelCase_ = output.prev_sample, output.prev_sample_mean
lowerCamelCase_ = sample_mean.clamp(0 , 1 )
lowerCamelCase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A_ )
| 651
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return EnvironmentCommand()
class A( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = 'not installed'
lowerCamelCase_ = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = 'not installed'
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = 'not installed'
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = 'not installed'
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def a__ ( A_ : Dict ) -> Any:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : dict ):
'''simple docstring'''
lowerCamelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCamelCase_ = set()
return any(
node not in visited and depth_first_search(lowercase , lowercase , lowercase , lowercase )
for node in graph )
def _SCREAMING_SNAKE_CASE ( lowercase : dict , lowercase : int , lowercase : set , lowercase : set ):
'''simple docstring'''
visited.add(lowercase )
rec_stk.add(lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase , lowercase , lowercase , lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 651
|
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 651
| 1
|
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651
| 1
|
import pytest
lowerCamelCase : Optional[int] = "__dummy_dataset1__"
lowerCamelCase : Tuple = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = dataset_loading_script_name
lowerCamelCase_ = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=lowercase )
lowerCamelCase_ = script_dir / f"""{script_name}.py"""
with open(lowercase , 'w' ) as f:
f.write(lowercase )
return str(lowercase )
| 651
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : str = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''camembert'''
def __init__( self : Dict , A_ : List[Any]=30522 , A_ : Tuple=768 , A_ : Optional[Any]=12 , A_ : str=12 , A_ : Union[str, Any]=3072 , A_ : Union[str, Any]="gelu" , A_ : Optional[Any]=0.1 , A_ : Any=0.1 , A_ : List[Any]=512 , A_ : Union[str, Any]=2 , A_ : Any=0.02 , A_ : int=1E-12 , A_ : Dict=1 , A_ : int=0 , A_ : List[str]=2 , A_ : List[str]="absolute" , A_ : Tuple=True , A_ : str=None , **A_ : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
class A( UpperCamelCase ):
'''simple docstring'''
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 651
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = word
return word
def a__ ( self : str , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
for token in re.findall(self.pat , A_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def a__ ( self : Tuple , A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 651
| 1
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ''''''
UpperCamelCase = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : str , A_ : Optional[DatasetInfo] = None , A_ : Optional[str] = None , **A_ : Tuple , ) -> int:
"""simple docstring"""
super().__init__(self , **A_ )
lowerCamelCase_ = repo_info
lowerCamelCase_ = token
lowerCamelCase_ = None
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if self.dir_cache is None:
lowerCamelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCamelCase_ = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(A_ ): {'name': str(A_ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def a__ ( self : str , A_ : str , A_ : str = "rb" , **A_ : Any , ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(self.repo_info , A_ ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowerCamelCase_ = hf_hub_url(self.repo_info.id , A_ , revision=self.repo_info.sha )
return fsspec.open(
A_ , mode=A_ , headers=get_authentication_headers_for_url(A_ , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def a__ ( self : Union[str, Any] , A_ : str , **A_ : Any ) -> str:
"""simple docstring"""
self._get_dirs()
lowerCamelCase_ = self._strip_protocol(A_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A_ )
def a__ ( self : Union[str, Any] , A_ : Any , A_ : Tuple=False , **A_ : Optional[Any] ) -> str:
"""simple docstring"""
self._get_dirs()
lowerCamelCase_ = PurePosixPath(path.strip('/' ) )
lowerCamelCase_ = {}
for p, f in self.dir_cache.items():
lowerCamelCase_ = PurePosixPath(p.strip('/' ) )
lowerCamelCase_ = p.parent
if root == path:
lowerCamelCase_ = f
lowerCamelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 651
|
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
|
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10 , lowercase : int = 10_00 , lowercase : bool = True ):
'''simple docstring'''
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
assert (
isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
lowerCamelCase_ = lower
lowerCamelCase_ = higher
lowerCamelCase_ = []
while True:
lowerCamelCase_ = get_avg(lowercase , lowercase )
last_numbers.append(lowercase )
if answer(lowercase ) == "low":
lowerCamelCase_ = number
elif answer(lowercase ) == "high":
lowerCamelCase_ = number
else:
break
print(f"""guess the number : {last_numbers[-1]}""" )
print(f"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = int(input('Enter lower value : ' ).strip() )
lowerCamelCase_ = int(input('Enter high value : ' ).strip() )
lowerCamelCase_ = int(input('Enter value to guess : ' ).strip() )
guess_the_number(lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 651
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
| 1
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase : Optional[int] = 2
class A:
'''simple docstring'''
def __init__( self : Tuple , *, # begin keyword-only arguments
A_ : Optional[Any]="<s>" , A_ : Tuple="<pad>" , A_ : Any="</s>" , A_ : Optional[int]="<unk>" , A_ : int=None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = bos, unk, pad, eos
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = {}
lowerCamelCase_ = self.add_symbol(A_ )
lowerCamelCase_ = self.add_symbol(A_ )
lowerCamelCase_ = self.add_symbol(A_ )
lowerCamelCase_ = self.add_symbol(A_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(A_ )
lowerCamelCase_ = len(self.symbols )
def __eq__( self : str , A_ : Dict ) -> str:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Tuple , A_ : List[Any] ) -> Any:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Any , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
return sym in self.indices
@classmethod
def a__ ( cls : Dict , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = cls()
d.add_from_file(A_ )
return d
def a__ ( self : Any , A_ : Tuple , A_ : Tuple=1 , A_ : Tuple=False ) -> Dict:
"""simple docstring"""
if word in self.indices and not overwrite:
lowerCamelCase_ = self.indices[word]
lowerCamelCase_ = self.count[idx] + n
return idx
else:
lowerCamelCase_ = len(self.symbols )
lowerCamelCase_ = idx
self.symbols.append(A_ )
self.count.append(A_ )
return idx
def a__ ( self : int , A_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return 0
def a__ ( self : Dict , A_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if isinstance(A_ , A_ ):
try:
with open(A_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(A_ ) )
return
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = self._load_meta(A_ )
for line in lines[indices_start_line:]:
try:
lowerCamelCase_ , lowerCamelCase_ = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
lowerCamelCase_ = True
lowerCamelCase_ , lowerCamelCase_ = line.rsplit(' ' , 1 )
else:
lowerCamelCase_ = False
lowerCamelCase_ = int(A_ )
lowerCamelCase_ = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(A_ ) )
self.add_symbol(A_ , n=A_ , overwrite=A_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = dict((re.sub(r'@@$' , '' , lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , lowercase ), v) for k, v in d.items() )
lowerCamelCase_ = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
lowerCamelCase_ = d[k] # restore
return da
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
if not os.path.exists(lowercase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowercase , exist_ok=lowercase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowerCamelCase_ = os.path.join(lowercase , 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )
lowerCamelCase_ = chkpt['cfg']['model']
# dicts
lowerCamelCase_ = os.path.join(lowercase , 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
lowerCamelCase_ = Dictionary.load(lowercase )
lowerCamelCase_ = rewrite_dict_keys(src_dict.indices )
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = os.path.join(lowercase , VOCAB_FILES_NAMES['vocab_file'] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , ensure_ascii=lowercase , indent=lowercase ) )
# merges_file (bpecodes)
lowerCamelCase_ = os.path.join(lowercase , 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
lowerCamelCase_ = os.path.join(lowercase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase , lowercase )
# model config
lowerCamelCase_ = os.path.join(lowercase , 'config.json' )
lowerCamelCase_ = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , ensure_ascii=lowercase , indent=lowercase ) )
# tokenizer config
lowerCamelCase_ = os.path.join(lowercase , lowercase )
lowerCamelCase_ = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , ensure_ascii=lowercase , indent=lowercase ) )
# model
lowerCamelCase_ = chkpt['model']
# remove unneeded keys
lowerCamelCase_ = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase , lowercase )
lowerCamelCase_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
lowerCamelCase_ = model_state_dict.pop(lowercase )
else:
lowerCamelCase_ = model_state_dict.pop(lowercase )
lowerCamelCase_ = BioGptConfig.from_pretrained(lowercase )
lowerCamelCase_ = BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
lowerCamelCase_ = os.path.join(lowercase , lowercase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowercase , lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase : str = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 651
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Tuple ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase_ = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase_ = torch.permute(lowercase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase ):
# linear layer
lowerCamelCase_ = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Any , lowercase : Tuple ):
'''simple docstring'''
if "metadata" in layer:
lowerCamelCase_ = layer.split('metadata' )
lowerCamelCase_ = ''.join(split_layer[0] )[:-1]
lowerCamelCase_ = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
lowerCamelCase_ = layer.split('kvstore' )
lowerCamelCase_ = ''.join(split_layer[0] )[:-1]
lowerCamelCase_ = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
lowerCamelCase_ = layer.split('/' )
lowerCamelCase_ = '/'.join(split_layer[:-1] )
lowerCamelCase_ = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase_ = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
lowerCamelCase_ = 'file'
else:
lowerCamelCase_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = rename_keys(lowercase )
lowerCamelCase_ = {}
for k, v in current_block.items():
lowerCamelCase_ = v
lowerCamelCase_ = new_current_block
torch.save(lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : List[str] , lowercase : Optional[Any] , lowercase : int , lowercase : str = WEIGHTS_NAME ):
'''simple docstring'''
lowerCamelCase_ = convert_file_size_to_int(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
os.makedirs(lowercase , exist_ok=lowercase )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
lowerCamelCase_ = serialization.msgpack_restore(fp.read() )['optimizer']['target']
lowerCamelCase_ = flatten_dict(lowercase , sep='/' )
lowerCamelCase_ = {}
for layer in checkpoint_info.keys():
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = get_key_and_tensorstore_dict(
lowercase , lowercase , lowercase )
if curr_real_layer_name in all_layers:
lowerCamelCase_ = content
else:
lowerCamelCase_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase_ = torch.tensor(lowercase )
lowerCamelCase_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase_ , lowerCamelCase_ = rename_base_flax_keys(tuple(key.split('/' ) ) , lowercase )
lowerCamelCase_ = '/'.join(lowercase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase_ = os.path.join(
lowercase , weights_name.replace('.bin' , f"""-{len(lowercase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase , lowercase )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = raw_weights.to(getattr(lowercase , lowercase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase_ = os.path.join(lowercase , weights_name.replace('.bin' , f"""-{len(lowercase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase , lowercase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for idx, shard in enumerate(lowercase ):
lowerCamelCase_ = weights_name.replace(
'.bin' , f"""-{idx+1:05d}-of-{len(lowercase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
lowerCamelCase_ = os.path.join(lowercase , weights_name.replace('.bin' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase , os.path.join(lowercase , lowercase ) )
lowerCamelCase_ = shard
for key in shard:
lowerCamelCase_ = shard_file
# Add the metadata
lowerCamelCase_ = {'total_size': total_size}
lowerCamelCase_ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowercase , lowercase ) , 'w' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + '\n'
f.write(lowercase )
return metadata, index
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
lowerCamelCase : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase_ = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
lowerCamelCase_ = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
lowerCamelCase_ = TaTokenizer.from_pretrained('t5-small' )
lowerCamelCase_ = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
lowerCamelCase_ = tokenizer(lowercase , return_tensors='pt' ).input_ids
lowerCamelCase_ = model.generate(lowercase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 651
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int:
"""simple docstring"""
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(A_ , mode=A_ )
lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(A_ , 'w' , newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , 'r' ) as f:
self.assertTrue(f.read() , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
| 651
| 1
|
import heapq as hq
import math
from collections.abc import Iterator
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = str(id_ )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = []
lowerCamelCase_ = {} # {vertex:distance}
def __lt__( self : Dict , A_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.id
def a__ ( self : int , A_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.neighbors.append(A_ )
def a__ ( self : Optional[Any] , A_ : Any , A_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = weight
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase )
graph[b - 1].add_edge(graph[a - 1] , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : list , lowercase : Vertex ):
'''simple docstring'''
lowerCamelCase_ = []
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = graph[:]
while q:
lowerCamelCase_ = min(lowercase )
q.remove(lowercase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
for i in range(1 , len(lowercase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _SCREAMING_SNAKE_CASE ( lowercase : list , lowercase : Vertex ):
'''simple docstring'''
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = list(lowercase )
hq.heapify(lowercase )
while h:
lowerCamelCase_ = hq.heappop(lowercase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
hq.heapify(lowercase )
for i in range(1 , len(lowercase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 1
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : List[str] = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( lowercase : Path , lowercase : list ):
'''simple docstring'''
lowerCamelCase_ = '\n'.join(lowercase )
Path(lowercase ).open('w' ).writelines(lowercase )
lowerCamelCase : Optional[int] = "patrickvonplaten/t5-tiny-random"
lowerCamelCase : Union[str, Any] = "sshleifer/bart-tiny-random"
lowerCamelCase : Tuple = "sshleifer/tiny-mbart"
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : List[str] , A_ : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowerCamelCase_ = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowerCamelCase_ = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(A_ , A_ )
lowerCamelCase_ = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
lowerCamelCase_ = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowerCamelCase_ = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_generate()
assert Path(A_ ).exists()
# os.remove(Path(output_file_name))
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.run_eval_tester(A_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def a__ ( self : Optional[int] , A_ : str ) -> int:
"""simple docstring"""
self.run_eval_tester(A_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def a__ ( self : Union[str, Any] , A_ : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowerCamelCase_ = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowerCamelCase_ = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
lowerCamelCase_ = Path(self.get_auto_remove_tmp_dir() )
lowerCamelCase_ = str(tmp_dir / 'scores.json' )
lowerCamelCase_ = str(tmp_dir / 'val.target' )
_dump_articles(A_ , text['en'] )
_dump_articles(A_ , text['de'] )
lowerCamelCase_ = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowerCamelCase_ = f"""
run_eval_search.py
{model}
{str(A_ )}
{str(A_ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(A_ , 'argv' , A_ ):
with CaptureStdout() as cs:
run_search()
lowerCamelCase_ = [' num_beams | length_penalty', model, 'Best score args']
lowerCamelCase_ = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(A_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(A_ ).exists()
os.remove(Path(A_ ) )
| 651
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 1
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 1
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase : Optional[Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCamelCase : Optional[int] = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCamelCase : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCamelCase : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def a__ ( self : int , A_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def a__ ( self : Dict , A_ : Optional[Any] , A_ : Any , A_ : Tuple=0.9 , A_ : List[Any]=3 , A_ : Optional[Any]=0.5 ) -> Optional[int]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
lowerCamelCase_ = [
meteor_score.single_meteor_score(
word_tokenize(A_ ) , word_tokenize(A_ ) , alpha=A_ , beta=A_ , gamma=A_ )
for ref, pred in zip(A_ , A_ )
]
else:
lowerCamelCase_ = [
meteor_score.single_meteor_score(A_ , A_ , alpha=A_ , beta=A_ , gamma=A_ )
for ref, pred in zip(A_ , A_ )
]
return {"meteor": np.mean(A_ )}
| 651
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCamelCase : int = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCamelCase : Optional[int] = 0
lowerCamelCase : List[Any] = 0xe0_00
lowerCamelCase : List[Any] = 0xe0_01
lowerCamelCase : Any = 0xe0_02
lowerCamelCase : Dict = 0xe0_03
lowerCamelCase : Union[str, Any] = 0xe0_04
# Maps special codepoints to human-readable names.
lowerCamelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCamelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , A_ : int=chr(A_ ) , A_ : Tuple=chr(A_ ) , A_ : Dict=chr(A_ ) , A_ : Optional[int]=chr(A_ ) , A_ : Any=chr(A_ ) , A_ : List[str]=chr(A_ ) , A_ : str=False , A_ : Dict=2048 , **A_ : Dict , ) -> Any:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , model_max_length=A_ , **A_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCamelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCamelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCamelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCamelCase_ = UNICODE_VOCAB_SIZE
lowerCamelCase_ = len(self._special_codepoints )
@property
def a__ ( self : int ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def a__ ( self : List[str] , A_ : str ) -> List[str]:
"""simple docstring"""
return list(A_ )
def a__ ( self : int , A_ : str ) -> int:
"""simple docstring"""
try:
return ord(A_ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def a__ ( self : List[str] , A_ : int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(A_ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def a__ ( self : Tuple , A_ : List[Any] ) -> Any:
"""simple docstring"""
return "".join(A_ )
def a__ ( self : List[str] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def a__ ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
lowerCamelCase_ = [1] + ([0] * len(A_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(A_ )) + [1]
return result
def a__ ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def a__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Any:
"""simple docstring"""
return ()
| 651
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 1
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 1
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = AutoConfig.from_pretrained(lowercase )
lowerCamelCase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase )
lowerCamelCase_ = checkpoints.load_tax_checkpoint(lowercase )
lowerCamelCase_ = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
lowerCamelCase_ = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase_ = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase_ = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase_ = f"""layers_{str(lowercase )}"""
# Self-Attention
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowerCamelCase_ = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowerCamelCase_ = flax_model.params['encoder']['block'][str(lowercase )]['layer']
lowerCamelCase_ = tax_attention_key
lowerCamelCase_ = tax_attention_out
lowerCamelCase_ = tax_attention_query
lowerCamelCase_ = tax_attention_value
lowerCamelCase_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase_ = tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase_ = tax_mlp_wi_a
lowerCamelCase_ = tax_mlp_wi_a
else:
lowerCamelCase_ = tax_mlp_wi
lowerCamelCase_ = tax_mlp_wo
lowerCamelCase_ = tax_mlp_layer_norm
lowerCamelCase_ = flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase_ = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
lowerCamelCase_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase_ = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
lowerCamelCase_ = tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase_ = tax_model['target']['encoder']['encoder_norm']['scale']
lowerCamelCase_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase_ = f"""layers_{str(lowercase )}"""
# Self-Attention
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
lowerCamelCase_ = tax_enc_dec_attention_module['key']['kernel']
lowerCamelCase_ = tax_enc_dec_attention_module['out']['kernel']
lowerCamelCase_ = tax_enc_dec_attention_module['query']['kernel']
lowerCamelCase_ = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowerCamelCase_ = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowerCamelCase_ = flax_model.params['decoder']['block'][str(lowercase )]['layer']
lowerCamelCase_ = tax_attention_key
lowerCamelCase_ = tax_attention_out
lowerCamelCase_ = tax_attention_query
lowerCamelCase_ = tax_attention_value
lowerCamelCase_ = tax_pre_attention_layer_norm
lowerCamelCase_ = tax_enc_dec_attention_key
lowerCamelCase_ = tax_enc_dec_attention_out
lowerCamelCase_ = tax_enc_dec_attention_query
lowerCamelCase_ = tax_enc_dec_attention_value
lowerCamelCase_ = tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase_ = tax_mlp_wi_a
lowerCamelCase_ = tax_mlp_wi_a
else:
lowerCamelCase_ = tax_mlp_wi
lowerCamelCase_ = tax_mlp_wo
lowerCamelCase_ = txa_mlp_layer_norm
lowerCamelCase_ = flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase_ = tax_model['target']['decoder']['decoder_norm']['scale']
lowerCamelCase_ = txa_decoder_norm
# Only for layer 0:
lowerCamelCase_ = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
lowerCamelCase_ = tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase_ = tax_model['target']['token_embedder']['embedding']
lowerCamelCase_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase_ = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(lowercase )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
lowerCamelCase : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 651
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 1
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Dict = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = GPTSwaTokenizer
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = False
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = GPTSwaTokenizer(A_ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : str , A_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'This is a test'
lowerCamelCase_ = 'This is a test'
return input_text, output_text
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = '<s>'
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(A_ ) , 2000 )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(A_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [465, 287, 265, 631, 842] )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
A_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(A_ )
# fmt: off
self.assertListEqual(
A_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(A_ )
lowerCamelCase_ = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCamelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A_ , A_ ):
self.assertListEqual(tokenizer.encode_fast(A_ ) , A_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(A_ , A_ ):
self.assertEqual(tokenizer.decode_fast(A_ ) , A_ )
@slow
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCamelCase_ = {'input_ids': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=A_ , )
| 651
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 1
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase : List[str] = TypeVar("KEY")
lowerCamelCase : str = TypeVar("VAL")
@dataclass(frozen=UpperCamelCase , slots=UpperCamelCase )
class A( Generic[KEY, VAL] ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
class A( _Item ):
'''simple docstring'''
def __init__( self : Tuple ) -> None:
"""simple docstring"""
super().__init__(A_ , A_ )
def __bool__( self : List[Any] ) -> bool:
"""simple docstring"""
return False
lowerCamelCase : Optional[Any] = _DeletedItem()
class A( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : str , A_ : int = 8 , A_ : float = 0.75 ) -> None:
"""simple docstring"""
lowerCamelCase_ = initial_block_size
lowerCamelCase_ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase_ = capacity_factor
lowerCamelCase_ = 0
def a__ ( self : Dict , A_ : KEY ) -> int:
"""simple docstring"""
return hash(A_ ) % len(self._buckets )
def a__ ( self : List[Any] , A_ : int ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def a__ ( self : Tuple , A_ : int , A_ : KEY , A_ : VAL ) -> bool:
"""simple docstring"""
lowerCamelCase_ = self._buckets[ind]
if not stored:
lowerCamelCase_ = _Item(A_ , A_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase_ = _Item(A_ , A_ )
return True
else:
return False
def a__ ( self : List[str] ) -> bool:
"""simple docstring"""
lowerCamelCase_ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(A_ )
def a__ ( self : Dict ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase_ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ ( self : Any , A_ : int ) -> None:
"""simple docstring"""
lowerCamelCase_ = self._buckets
lowerCamelCase_ = [None] * new_size
lowerCamelCase_ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def a__ ( self : Tuple ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def a__ ( self : Any ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def a__ ( self : Tuple , A_ : KEY ) -> Iterator[int]:
"""simple docstring"""
lowerCamelCase_ = self._get_bucket_index(A_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase_ = self._get_next_ind(A_ )
def a__ ( self : int , A_ : KEY , A_ : VAL ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(A_ ):
if self._try_set(A_ , A_ , A_ ):
break
def __setitem__( self : List[str] , A_ : KEY , A_ : VAL ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(A_ , A_ )
def __delitem__( self : int , A_ : KEY ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(A_ ):
lowerCamelCase_ = self._buckets[ind]
if item is None:
raise KeyError(A_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase_ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[int] , A_ : KEY ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(A_ ):
lowerCamelCase_ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A_ )
def __len__( self : int ) -> int:
"""simple docstring"""
return self._len
def __iter__( self : List[Any] ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = ' ,'.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 651
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
lowerCamelCase_ = 2**power
lowerCamelCase_ = str(lowercase )
lowerCamelCase_ = list(lowercase )
lowerCamelCase_ = 0
for i in list_num:
sum_of_num += int(lowercase )
return sum_of_num
if __name__ == "__main__":
lowerCamelCase : int = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
lowerCamelCase : Optional[Any] = solution(power)
print("Sum of the digits is: ", result)
| 651
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
| 1
|
import requests
lowerCamelCase : List[Any] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 651
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 1
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A:
'''simple docstring'''
def __init__( self : Tuple , A_ : Dict , A_ : Dict , A_ : Union[str, Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Union[str, Any]=0.2 , A_ : List[Any]=0.2 ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = bp_numa
lowerCamelCase_ = bp_numa
lowerCamelCase_ = bp_numa
lowerCamelCase_ = conva_get[:2]
lowerCamelCase_ = conva_get[2]
lowerCamelCase_ = size_pa
lowerCamelCase_ = rate_w
lowerCamelCase_ = rate_t
lowerCamelCase_ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCamelCase_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase_ = -2 * np.random.rand(self.conva[1] ) + 1
lowerCamelCase_ = -2 * np.random.rand(self.num_bpa ) + 1
lowerCamelCase_ = -2 * np.random.rand(self.num_bpa ) + 1
def a__ ( self : Optional[Any] , A_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(A_ , 'wb' ) as f:
pickle.dump(A_ , A_ )
print(f"""Model saved: {save_path}""" )
@classmethod
def a__ ( cls : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
with open(A_ , 'rb' ) as f:
lowerCamelCase_ = pickle.load(A_ ) # noqa: S301
lowerCamelCase_ = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowerCamelCase_ = model_dic.get('size_pooling1' )
lowerCamelCase_ = model_dic.get('num_bp1' )
lowerCamelCase_ = model_dic.get('num_bp2' )
lowerCamelCase_ = model_dic.get('num_bp3' )
lowerCamelCase_ = model_dic.get('rate_weight' )
lowerCamelCase_ = model_dic.get('rate_thre' )
# create model instance
lowerCamelCase_ = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ )
# modify model parameter
lowerCamelCase_ = model_dic.get('w_conv1' )
lowerCamelCase_ = model_dic.get('wkj' )
lowerCamelCase_ = model_dic.get('vji' )
lowerCamelCase_ = model_dic.get('thre_conv1' )
lowerCamelCase_ = model_dic.get('thre_bp2' )
lowerCamelCase_ = model_dic.get('thre_bp3' )
return conv_ins
def a__ ( self : Optional[int] , A_ : Any ) -> List[str]:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def a__ ( self : Any , A_ : Any ) -> Optional[Any]:
"""simple docstring"""
return round(A_ , 3 )
def a__ ( self : int , A_ : Optional[int] , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = convs[0]
lowerCamelCase_ = convs[1]
lowerCamelCase_ = np.shape(A_ )[0]
# get the data slice of original image data, data_focus
lowerCamelCase_ = []
for i_focus in range(0 , size_data - size_conv + 1 , A_ ):
for j_focus in range(0 , size_data - size_conv + 1 , A_ ):
lowerCamelCase_ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(A_ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCamelCase_ = []
lowerCamelCase_ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(A_ ):
lowerCamelCase_ = []
for i_focus in range(len(A_ ) ):
lowerCamelCase_ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(A_ ) )
lowerCamelCase_ = np.asmatrix(A_ ).reshape(
A_ , A_ )
data_featuremap.append(A_ )
# expanding the data slice to One dimenssion
lowerCamelCase_ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(A_ ) )
lowerCamelCase_ = np.asarray(A_ )
return focus_list, data_featuremap
def a__ ( self : Tuple , A_ : int , A_ : List[str] , A_ : str="average_pool" ) -> int:
"""simple docstring"""
lowerCamelCase_ = len(featuremaps[0] )
lowerCamelCase_ = int(size_map / size_pooling )
lowerCamelCase_ = []
for i_map in range(len(A_ ) ):
lowerCamelCase_ = featuremaps[i_map]
lowerCamelCase_ = []
for i_focus in range(0 , A_ , A_ ):
for j_focus in range(0 , A_ , A_ ):
lowerCamelCase_ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(A_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(A_ ) )
lowerCamelCase_ = np.asmatrix(A_ ).reshape(A_ , A_ )
featuremap_pooled.append(A_ )
return featuremap_pooled
def a__ ( self : List[str] , A_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = []
for i in range(len(A_ ) ):
lowerCamelCase_ = np.shape(data[i] )
lowerCamelCase_ = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCamelCase_ = data_listed.getA().tolist()[0]
data_expanded.extend(A_ )
lowerCamelCase_ = np.asarray(A_ )
return data_expanded
def a__ ( self : Optional[Any] , A_ : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = np.asarray(A_ )
lowerCamelCase_ = np.shape(A_ )
lowerCamelCase_ = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def a__ ( self : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[Any] , A_ : int , A_ : int , A_ : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = 0
for i_map in range(A_ ):
lowerCamelCase_ = np.ones((size_map, size_map) )
for i in range(0 , A_ , A_ ):
for j in range(0 , A_ , A_ ):
lowerCamelCase_ = pd_pool[
i_pool
]
lowerCamelCase_ = i_pool + 1
lowerCamelCase_ = np.multiply(
A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(A_ )
return pd_all
def a__ ( self : Dict , A_ : int , A_ : Dict , A_ : Dict , A_ : int , A_ : Tuple , A_ : Dict=bool ) -> int:
"""simple docstring"""
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(A_ )) )
print((' - - Shape: Teach_Data ', np.shape(A_ )) )
lowerCamelCase_ = 0
lowerCamelCase_ = []
lowerCamelCase_ = 10000
while rp < n_repeat and mse >= error_accuracy:
lowerCamelCase_ = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(A_ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCamelCase_ = np.asmatrix(datas_train[p] )
lowerCamelCase_ = np.asarray(datas_teach[p] )
lowerCamelCase_ , lowerCamelCase_ = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase_ = self.pooling(A_ , self.size_poolinga )
lowerCamelCase_ = np.shape(A_ )
lowerCamelCase_ = self._expand(A_ )
lowerCamelCase_ = data_bp_input
lowerCamelCase_ = np.dot(A_ , self.vji.T ) - self.thre_bpa
lowerCamelCase_ = self.sig(A_ )
lowerCamelCase_ = np.dot(A_ , self.wkj.T ) - self.thre_bpa
lowerCamelCase_ = self.sig(A_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCamelCase_ = np.multiply(
(data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) )
lowerCamelCase_ = np.multiply(
np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) )
lowerCamelCase_ = np.dot(A_ , self.vji )
lowerCamelCase_ = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCamelCase_ = pd_conva_pooled.T.getA().tolist()
lowerCamelCase_ = self._calculate_gradient_from_pool(
A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCamelCase_ = self._expand_mat(pd_conva_all[k_conv] )
lowerCamelCase_ = self.rate_weight * np.dot(A_ , A_ )
lowerCamelCase_ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCamelCase_ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCamelCase_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCamelCase_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCamelCase_ = self.thre_bpa - pd_k_all * self.rate_thre
lowerCamelCase_ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCamelCase_ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCamelCase_ = rp + 1
lowerCamelCase_ = error_count / patterns
all_mse.append(A_ )
def draw_error():
lowerCamelCase_ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(A_ , '+-' )
plt.plot(A_ , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(A_ , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def a__ ( self : Any , A_ : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(A_ )) )
for p in range(len(A_ ) ):
lowerCamelCase_ = np.asmatrix(datas_test[p] )
lowerCamelCase_ , lowerCamelCase_ = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase_ = self.pooling(A_ , self.size_poolinga )
lowerCamelCase_ = self._expand(A_ )
lowerCamelCase_ = data_bp_input
lowerCamelCase_ = bp_outa * self.vji.T - self.thre_bpa
lowerCamelCase_ = self.sig(A_ )
lowerCamelCase_ = bp_outa * self.wkj.T - self.thre_bpa
lowerCamelCase_ = self.sig(A_ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCamelCase_ = [list(map(self.do_round , A_ ) ) for each in produce_out]
return np.asarray(A_ )
def a__ ( self : Dict , A_ : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = np.asmatrix(A_ )
lowerCamelCase_ , lowerCamelCase_ = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase_ = self.pooling(A_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 651
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return EnvironmentCommand()
class A( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = 'not installed'
lowerCamelCase_ = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = 'not installed'
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = 'not installed'
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = 'not installed'
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def a__ ( A_ : Dict ) -> Any:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 651
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCamelCase : int = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , *A_ : Union[str, Any] , **A_ : Union[str, Any] ) -> None:
"""simple docstring"""
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 651
|
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 651
| 1
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCamelCase : Any = "base_with_context"
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase_ = weights[f"""layers_{lyr_num}"""]
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase_ = ly_weight['attention']
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase_ = weights[f"""layers_{lyr_num}"""]
lowerCamelCase_ = ly_weight['attention']
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase_ = weights[f"""layers_{lyr_num}"""]
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase_ = ly_weight['self_attention']
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase_ = ly_weight['MultiHeadDotProductAttention_0']
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
lowerCamelCase_ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase_ = jnp.tree_util.tree_map(onp.array , lowercase )
lowerCamelCase_ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
lowerCamelCase_ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
lowerCamelCase_ = inference.parse_training_gin_file(lowercase , lowercase )
lowerCamelCase_ = inference.InferenceModel(args.checkpoint_path , lowercase )
lowerCamelCase_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
lowerCamelCase_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase_ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowercase )
lowerCamelCase_ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowercase )
lowerCamelCase_ = load_decoder(ta_checkpoint['target']['decoder'] , lowercase )
lowerCamelCase_ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
lowerCamelCase_ = SpectrogramDiffusionPipeline(
notes_encoder=lowercase , continuous_encoder=lowercase , decoder=lowercase , scheduler=lowercase , melgan=lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
lowerCamelCase : Optional[int] = parser.parse_args()
main(args)
| 651
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = 3_84
lowerCamelCase_ = 7
if "tiny" in model_name:
lowerCamelCase_ = 96
lowerCamelCase_ = (2, 2, 6, 2)
lowerCamelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCamelCase_ = 96
lowerCamelCase_ = (2, 2, 18, 2)
lowerCamelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCamelCase_ = 1_28
lowerCamelCase_ = (2, 2, 18, 2)
lowerCamelCase_ = (4, 8, 16, 32)
lowerCamelCase_ = 12
lowerCamelCase_ = 5_12
elif "large" in model_name:
lowerCamelCase_ = 1_92
lowerCamelCase_ = (2, 2, 18, 2)
lowerCamelCase_ = (6, 12, 24, 48)
lowerCamelCase_ = 12
lowerCamelCase_ = 7_68
# set label information
lowerCamelCase_ = 1_50
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = SwinConfig(
embed_dim=lowercase , depths=lowercase , num_heads=lowercase , window_size=lowercase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
lowerCamelCase_ = UperNetConfig(
backbone_config=lowercase , auxiliary_in_channels=lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = dct.pop(lowercase )
lowerCamelCase_ = val
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
lowerCamelCase_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[:dim, :]
lowerCamelCase_ = in_proj_bias[: dim]
lowerCamelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCamelCase_ = in_proj_weight[
-dim :, :
]
lowerCamelCase_ = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = x.shape
lowerCamelCase_ = x.reshape(lowercase , 4 , in_channel // 4 )
lowerCamelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowercase , lowercase )
return x
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = x.shape
lowerCamelCase_ = x.reshape(lowercase , in_channel // 4 , 4 )
lowerCamelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowercase , lowercase )
return x
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = x.shape[0]
lowerCamelCase_ = x.reshape(4 , in_channel // 4 )
lowerCamelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowercase )
return x
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = x.shape[0]
lowerCamelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCamelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowercase )
return x
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
lowerCamelCase_ = model_name_to_url[model_name]
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowercase , map_location='cpu' , file_name=lowercase )[
'state_dict'
]
for name, param in state_dict.items():
print(lowercase , param.shape )
lowerCamelCase_ = get_upernet_config(lowercase )
lowerCamelCase_ = UperNetForSemanticSegmentation(lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowercase )
if "bn" in key:
lowerCamelCase_ = key.replace('bn' , 'batch_norm' )
lowerCamelCase_ = val
# rename keys
lowerCamelCase_ = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCamelCase_ = reverse_correct_unfold_reduction_order(lowercase )
if "norm" in key:
lowerCamelCase_ = reverse_correct_unfold_norm_order(lowercase )
model.load_state_dict(lowercase )
# verify on image
lowerCamelCase_ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
lowerCamelCase_ = SegformerImageProcessor()
lowerCamelCase_ = processor(lowercase , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCamelCase_ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowerCamelCase_ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowerCamelCase_ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowerCamelCase_ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 651
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''Wav2Vec2FeatureExtractor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : Tuple , A_ : Any , A_ : Any ) -> Any:
"""simple docstring"""
super().__init__(A_ , A_ )
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
@classmethod
def a__ ( cls : Optional[int] , A_ : Optional[int] , **A_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
try:
return super().from_pretrained(A_ , **A_ )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , A_ , )
lowerCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(A_ , **A_ )
lowerCamelCase_ = WavaVecaCTCTokenizer.from_pretrained(A_ , **A_ )
return cls(feature_extractor=A_ , tokenizer=A_ )
def __call__( self : Any , *A_ : str , **A_ : Dict ) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase_ = kwargs.pop('raw_speech' )
else:
lowerCamelCase_ = kwargs.pop('audio' , A_ )
lowerCamelCase_ = kwargs.pop('sampling_rate' , A_ )
lowerCamelCase_ = kwargs.pop('text' , A_ )
if len(A_ ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
lowerCamelCase_ = self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_ = encodings['input_ids']
return inputs
def a__ ( self : int , *A_ : Any , **A_ : Tuple ) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*A_ , **A_ )
lowerCamelCase_ = kwargs.pop('input_features' , A_ )
lowerCamelCase_ = kwargs.pop('labels' , A_ )
if len(A_ ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if input_features is not None:
lowerCamelCase_ = self.feature_extractor.pad(A_ , *A_ , **A_ )
if labels is not None:
lowerCamelCase_ = self.tokenizer.pad(A_ , **A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase_ = labels['input_ids']
return input_features
def a__ ( self : Union[str, Any] , *A_ : List[str] , **A_ : str ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def a__ ( self : Optional[int] , *A_ : Tuple , **A_ : str ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@contextmanager
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer
yield
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
| 651
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = word
return word
def a__ ( self : str , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
for token in re.findall(self.pat , A_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def a__ ( self : Tuple , A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 651
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[Any] , lowercase : int , lowercase : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
with open(lowercase ) as metadata_file:
lowerCamelCase_ = json.load(lowercase )
lowerCamelCase_ = LukeConfig(use_entity_aware_attention=lowercase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )['module']
# Load the entity vocab file
lowerCamelCase_ = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
lowerCamelCase_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCamelCase_ = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase_ = AddedToken('<ent>' , lstrip=lowercase , rstrip=lowercase )
lowerCamelCase_ = AddedToken('<ent2>' , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'r' ) as f:
lowerCamelCase_ = json.load(lowercase )
lowerCamelCase_ = 'MLukeTokenizer'
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowercase , lowercase )
lowerCamelCase_ = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(['@'] )[0]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(['#'] )[0]
lowerCamelCase_ = state_dict['embeddings.word_embeddings.weight']
lowerCamelCase_ = word_emb[ent_init_index].unsqueeze(0 )
lowerCamelCase_ = word_emb[enta_init_index].unsqueeze(0 )
lowerCamelCase_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCamelCase_ = state_dict[bias_name]
lowerCamelCase_ = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCamelCase_ = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCamelCase_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase_ = f"""encoder.layer.{layer_index}.attention.self."""
lowerCamelCase_ = state_dict[prefix + matrix_name]
lowerCamelCase_ = state_dict[prefix + matrix_name]
lowerCamelCase_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase_ = state_dict['entity_embeddings.entity_embeddings.weight']
lowerCamelCase_ = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
lowerCamelCase_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCamelCase_ = state_dict['entity_predictions.bias']
lowerCamelCase_ = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
lowerCamelCase_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCamelCase_ = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
lowerCamelCase_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
lowerCamelCase_ = state_dict[key]
else:
lowerCamelCase_ = state_dict[key]
lowerCamelCase_ , lowerCamelCase_ = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCamelCase_ = MLukeTokenizer.from_pretrained(lowercase , task='entity_classification' )
lowerCamelCase_ = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
lowerCamelCase_ = (0, 9)
lowerCamelCase_ = tokenizer(lowercase , entity_spans=[span] , return_tensors='pt' )
lowerCamelCase_ = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase_ = torch.Size((1, 33, 7_68) )
lowerCamelCase_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase_ = torch.Size((1, 1, 7_68) )
lowerCamelCase_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCamelCase_ = MLukeTokenizer.from_pretrained(lowercase )
lowerCamelCase_ = 'Tokyo is the capital of <mask>.'
lowerCamelCase_ = (24, 30)
lowerCamelCase_ = tokenizer(lowercase , entity_spans=[span] , return_tensors='pt' )
lowerCamelCase_ = model(**lowercase )
lowerCamelCase_ = encoding['input_ids'][0].tolist()
lowerCamelCase_ = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
lowerCamelCase_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
lowerCamelCase_ = outputs.entity_logits[0][0].argmax().item()
lowerCamelCase_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowercase ) )
model.save_pretrained(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = ['[MASK]', '[PAD]', '[UNK]']
lowerCamelCase_ = [json.loads(lowercase ) for line in open(lowercase )]
lowerCamelCase_ = {}
for entry in data:
lowerCamelCase_ = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCamelCase_ = entity_id
break
lowerCamelCase_ = f"""{language}:{entity_name}"""
lowerCamelCase_ = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 651
|
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651
| 1
|
from math import sqrt
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00_00 ):
'''simple docstring'''
lowerCamelCase_ = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 651
|
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''trocr'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : str , A_ : List[str]=50265 , A_ : List[Any]=1024 , A_ : Any=12 , A_ : int=16 , A_ : Optional[Any]=4096 , A_ : List[str]="gelu" , A_ : List[Any]=512 , A_ : List[Any]=0.1 , A_ : List[str]=0.0 , A_ : Optional[int]=0.0 , A_ : Optional[int]=2 , A_ : Optional[int]=0.02 , A_ : List[Any]=0.0 , A_ : int=True , A_ : Optional[int]=False , A_ : Any=True , A_ : Optional[Any]=True , A_ : List[Any]=1 , A_ : List[str]=0 , A_ : List[str]=2 , **A_ : str , ) -> int:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = d_model
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = activation_function
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = init_std
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = scale_embedding
lowerCamelCase_ = use_learned_position_embeddings
lowerCamelCase_ = layernorm_embedding
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , **A_ , )
| 651
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''sew-d'''
def __init__( self : int , A_ : Optional[int]=32 , A_ : int=768 , A_ : List[str]=12 , A_ : str=12 , A_ : int=3072 , A_ : Tuple=2 , A_ : str=512 , A_ : List[str]=256 , A_ : Any=True , A_ : List[Any]=True , A_ : str=("p2c", "c2p") , A_ : Dict="layer_norm" , A_ : List[str]="gelu_python" , A_ : str=0.1 , A_ : Optional[int]=0.1 , A_ : Optional[Any]=0.1 , A_ : Tuple=0.0 , A_ : Tuple=0.1 , A_ : Union[str, Any]=0.02 , A_ : Dict=1E-7 , A_ : Union[str, Any]=1E-5 , A_ : Any="group" , A_ : Union[str, Any]="gelu" , A_ : Optional[int]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A_ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A_ : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A_ : Any=False , A_ : List[str]=128 , A_ : Tuple=16 , A_ : int=True , A_ : Tuple=0.05 , A_ : Any=10 , A_ : int=2 , A_ : Any=0.0 , A_ : int=10 , A_ : List[str]=0 , A_ : Optional[int]="mean" , A_ : str=False , A_ : List[Any]=False , A_ : Union[str, Any]=256 , A_ : Optional[Any]=0 , A_ : List[Any]=1 , A_ : int=2 , **A_ : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = feat_extract_norm
lowerCamelCase_ = feat_extract_activation
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = conv_bias
lowerCamelCase_ = num_conv_pos_embeddings
lowerCamelCase_ = num_conv_pos_embedding_groups
lowerCamelCase_ = len(self.conv_dim )
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = squeeze_factor
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = position_buckets
lowerCamelCase_ = share_att_key
lowerCamelCase_ = relative_attention
lowerCamelCase_ = norm_rel_ebd
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = feat_proj_dropout
lowerCamelCase_ = final_dropout
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = feature_layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
# ctc loss
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# sequence classification
lowerCamelCase_ = use_weighted_layer_sum
lowerCamelCase_ = classifier_proj_size
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 651
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
| 1
|
from math import isqrt
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase ) + 1 ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10**6 ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 651
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int:
"""simple docstring"""
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(A_ , mode=A_ )
lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(A_ , 'w' , newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , 'r' ) as f:
self.assertTrue(f.read() , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
| 651
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Any = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''codegen'''
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , A_ : List[str]=50400 , A_ : str=2048 , A_ : int=2048 , A_ : Union[str, Any]=4096 , A_ : str=28 , A_ : Optional[Any]=16 , A_ : Dict=64 , A_ : int=None , A_ : Optional[Any]="gelu_new" , A_ : List[Any]=0.0 , A_ : Dict=0.0 , A_ : Dict=0.0 , A_ : Dict=1E-5 , A_ : Union[str, Any]=0.02 , A_ : Optional[Any]=True , A_ : Union[str, Any]=50256 , A_ : Optional[int]=50256 , A_ : Dict=False , **A_ : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_ctx
lowerCamelCase_ = n_positions
lowerCamelCase_ = n_embd
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = n_inner
lowerCamelCase_ = rotary_dim
lowerCamelCase_ = activation_function
lowerCamelCase_ = resid_pdrop
lowerCamelCase_ = embd_pdrop
lowerCamelCase_ = attn_pdrop
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = use_cache
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
super().__init__(
bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , A_ : PretrainedConfig , A_ : str = "default" , A_ : List[PatchingSpec] = None , A_ : bool = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ )
if not getattr(self._config , 'pad_token_id' , A_ ):
# TODO: how to do that better?
lowerCamelCase_ = 0
@property
def a__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowerCamelCase_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
lowerCamelCase_ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self._config.n_head
def a__ ( self : Union[str, Any] , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowerCamelCase_ = super(A_ , self ).generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase_ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase_ , lowerCamelCase_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase_ = seqlen + 2
lowerCamelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase_ = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers )
]
lowerCamelCase_ = common_inputs['attention_mask']
if self.use_past:
lowerCamelCase_ = ordered_inputs['attention_mask'].dtype
lowerCamelCase_ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
return ordered_inputs
@property
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
return 13
| 651
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase : Optional[int] = get_logger(__name__)
class A( enum.Enum ):
'''simple docstring'''
UpperCamelCase = '''all_checks'''
UpperCamelCase = '''basic_checks'''
UpperCamelCase = '''no_checks'''
class A( UpperCamelCase ):
'''simple docstring'''
class A( UpperCamelCase ):
'''simple docstring'''
class A( UpperCamelCase ):
'''simple docstring'''
class A( UpperCamelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[dict] , lowercase : dict , lowercase : Union[str, Any]=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase ) - set(lowercase ) ) )
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase ) - set(lowercase ) ) )
lowerCamelCase_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCamelCase_ = ' for ' + verification_name if verification_name is not None else ''
if len(lowercase ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A( UpperCamelCase ):
'''simple docstring'''
class A( UpperCamelCase ):
'''simple docstring'''
class A( UpperCamelCase ):
'''simple docstring'''
class A( UpperCamelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[dict] , lowercase : dict ):
'''simple docstring'''
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase ) - set(lowercase ) ) )
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise UnexpectedSplits(str(set(lowercase ) - set(lowercase ) ) )
lowerCamelCase_ = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase ) )
logger.info('All the splits matched successfully.' )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : bool = True ):
'''simple docstring'''
if record_checksum:
lowerCamelCase_ = shaaaa()
with open(lowercase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(lowercase )
lowerCamelCase_ = m.hexdigest()
else:
lowerCamelCase_ = None
return {"num_bytes": os.path.getsize(lowercase ), "checksum": checksum}
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 651
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class A:
'''simple docstring'''
UpperCamelCase = LEDConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self : List[Any] , A_ : int , A_ : Tuple=13 , A_ : Optional[int]=7 , A_ : Any=True , A_ : Optional[int]=False , A_ : Tuple=99 , A_ : Any=32 , A_ : List[Any]=2 , A_ : int=4 , A_ : Tuple=37 , A_ : List[Any]=0.1 , A_ : Dict=0.1 , A_ : List[Any]=20 , A_ : Optional[Any]=2 , A_ : Union[str, Any]=1 , A_ : Dict=0 , A_ : Any=4 , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCamelCase_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCamelCase_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowerCamelCase_ = prepare_led_inputs_dict(A_ , A_ , A_ )
lowerCamelCase_ = tf.concat(
[tf.zeros_like(A_ )[:, :-1], tf.ones_like(A_ )[:, -1:]] , axis=-1 , )
lowerCamelCase_ = global_attention_mask
return config, inputs_dict
def a__ ( self : List[Any] , A_ : Union[str, Any] , A_ : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = TFLEDModel(config=A_ ).get_decoder()
lowerCamelCase_ = inputs_dict['input_ids']
lowerCamelCase_ = input_ids[:1, :]
lowerCamelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCamelCase_ = 1
# first forward pass
lowerCamelCase_ = model(A_ , attention_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Dict=None , lowercase : Union[str, Any]=None , lowercase : List[Any]=None , lowercase : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase_ = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFLEDModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = tf.zeros_like(inputs_dict['attention_mask'] )
lowerCamelCase_ = 2
lowerCamelCase_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
lowerCamelCase_ = True
lowerCamelCase_ = self.model_tester.seq_length
lowerCamelCase_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(A_ : Optional[int] ):
lowerCamelCase_ = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(A_ : Any ):
lowerCamelCase_ = [t.numpy() for t in outputs.encoder_attentions]
lowerCamelCase_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
pass
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
return tf.constant(lowercase , dtype=tf.intaa )
lowerCamelCase : int = 1e-4
@slow
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
lowerCamelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCamelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCamelCase_ = prepare_led_inputs_dict(model.config , A_ , A_ )
lowerCamelCase_ = model(**A_ )[0]
lowerCamelCase_ = (1, 1024, 768)
self.assertEqual(output.shape , A_ )
# change to expected output here
lowerCamelCase_ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-3 )
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
lowerCamelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCamelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCamelCase_ = prepare_led_inputs_dict(model.config , A_ , A_ )
lowerCamelCase_ = model(**A_ )[0]
lowerCamelCase_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , A_ )
# change to expected output here
lowerCamelCase_ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-3 , rtol=1E-3 )
| 651
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
'do_convert_rgb': True,
}
lowerCamelCase_ = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(A_ , A_ )
def a__ ( self : Dict , **A_ : List[str] ) -> Any:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : Union[str, Any] , **A_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : Optional[int] , **A_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
lowerCamelCase_ = self.get_image_processor(do_normalize=A_ )
lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(A_ , return_tensors='np' )
lowerCamelCase_ = processor(images=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。'
lowerCamelCase_ = processor(text=A_ )
lowerCamelCase_ = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(A_ )
lowerCamelCase_ = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 651
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCamelCase : Optional[List[str]] = None
lowerCamelCase : Optional[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCamelCase : Optional[int] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "PIL.Image.Image"
UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCamelCase = field(default='''Image''' , init=UpperCamelCase , repr=UpperCamelCase )
def __call__( self : str ) -> Optional[int]:
"""simple docstring"""
return self.pa_type
def a__ ( self : Optional[Any] , A_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(A_ , A_ ):
lowerCamelCase_ = np.array(A_ )
if isinstance(A_ , A_ ):
return {"path": value, "bytes": None}
elif isinstance(A_ , A_ ):
return {"path": None, "bytes": value}
elif isinstance(A_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A_ )
elif isinstance(A_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A_ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def a__ ( self : Optional[int] , A_ : dict , A_ : Dict=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
lowerCamelCase_ = {}
lowerCamelCase_ , lowerCamelCase_ = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(A_ ):
lowerCamelCase_ = PIL.Image.open(A_ )
else:
lowerCamelCase_ = path.split('::' )[-1]
try:
lowerCamelCase_ = string_to_dict(A_ , config.HUB_DATASETS_URL )['repo_id']
lowerCamelCase_ = token_per_repo_id.get(A_ )
except ValueError:
lowerCamelCase_ = None
with xopen(A_ , 'rb' , use_auth_token=A_ ) as f:
lowerCamelCase_ = BytesIO(f.read() )
lowerCamelCase_ = PIL.Image.open(bytes_ )
else:
lowerCamelCase_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def a__ ( self : Optional[int] , A_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.binary() )
lowerCamelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.string() )
lowerCamelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
lowerCamelCase_ = storage.field('bytes' )
else:
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
lowerCamelCase_ = storage.field('path' )
else:
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.string() )
lowerCamelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCamelCase_ = pa.array(
[encode_np_array(np.array(A_ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.string() )
lowerCamelCase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(A_ , self.pa_type )
def a__ ( self : str , A_ : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(A_ : List[Any] ):
with xopen(A_ , 'rb' ) as f:
lowerCamelCase_ = f.read()
return bytes_
lowerCamelCase_ = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase_ = pa.array(
[os.path.basename(A_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
lowerCamelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(A_ , self.pa_type )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCamelCase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE ( lowercase : "PIL.Image.Image" ):
'''simple docstring'''
lowerCamelCase_ = BytesIO()
if image.format in list_image_compression_formats():
lowerCamelCase_ = image.format
else:
lowerCamelCase_ = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowercase , format=lowercase )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE ( lowercase : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(lowercase , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase )}
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
lowerCamelCase_ = array.dtype
lowerCamelCase_ = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
lowerCamelCase_ = dtype.kind
lowerCamelCase_ = dtype.itemsize
lowerCamelCase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCamelCase_ = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCamelCase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCamelCase_ = dtype_byteorder + dtype_kind + str(lowercase )
lowerCamelCase_ = np.dtype(lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
lowerCamelCase_ = PIL.Image.fromarray(array.astype(lowercase ) )
return {"path": None, "bytes": image_to_bytes(lowercase )}
def _SCREAMING_SNAKE_CASE ( lowercase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
lowerCamelCase_ , lowerCamelCase_ = first_non_null_value(lowercase )
if isinstance(lowercase , lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase , np.ndarray ):
lowerCamelCase_ = no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
elif isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 651
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 1
|
import numpy as np
from transformers import Pipeline
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = np.max(lowercase , axis=-1 , keepdims=lowercase )
lowerCamelCase_ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : int , **A_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = {}
if "second_text" in kwargs:
lowerCamelCase_ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def a__ ( self : Tuple , A_ : Dict , A_ : Tuple=None ) -> Dict:
"""simple docstring"""
return self.tokenizer(A_ , text_pair=A_ , return_tensors=self.framework )
def a__ ( self : Any , A_ : Any ) -> Dict:
"""simple docstring"""
return self.model(**A_ )
def a__ ( self : int , A_ : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = model_outputs.logits[0].numpy()
lowerCamelCase_ = softmax(A_ )
lowerCamelCase_ = np.argmax(A_ )
lowerCamelCase_ = self.model.config.idalabel[best_class]
lowerCamelCase_ = probabilities[best_class].item()
lowerCamelCase_ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 651
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 1
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : int , A_ : float ) -> float:
"""simple docstring"""
return 0.0
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _SCREAMING_SNAKE_CASE ( lowercase : FilterType , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 5_12
lowerCamelCase_ = [1] + [0] * (size - 1)
lowerCamelCase_ = [filter_type.process(lowercase ) for item in inputs]
lowerCamelCase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ = np.abs(np.fft.fft(lowercase ) )
lowerCamelCase_ = 20 * np.logaa(lowercase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
lowerCamelCase_ = get_bounds(lowercase , lowercase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(lowercase )
plt.show()
def _SCREAMING_SNAKE_CASE ( lowercase : FilterType , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 5_12
lowerCamelCase_ = [1] + [0] * (size - 1)
lowerCamelCase_ = [filter_type.process(lowercase ) for item in inputs]
lowerCamelCase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ = np.angle(np.fft.fft(lowercase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(lowercase , -2 * pi ) )
plt.show()
| 651
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : int , lowercase : Dict , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = original_name.split('.' )[0]
lowerCamelCase_ = key.split('.' )
lowerCamelCase_ = int(key_list[key_list.index(lowercase ) - 2] )
lowerCamelCase_ = int(key_list[key_list.index(lowercase ) - 1] )
lowerCamelCase_ = orig_block_num - offset
lowerCamelCase_ = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = OrderedDict()
lowerCamelCase_ , lowerCamelCase_ = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
lowerCamelCase_ = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCamelCase_ = key[: key.find('proj' )]
lowerCamelCase_ = key.replace(lowercase , f"""patch_embeddings.{total_embed_found}.""" )
lowerCamelCase_ = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCamelCase_ = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
lowerCamelCase_ = replace_key_with_offset(lowercase , lowercase , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
lowerCamelCase_ = replace_key_with_offset(lowercase , lowercase , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
lowerCamelCase_ = replace_key_with_offset(lowercase , lowercase , 'norm1' , 'before_norm' )
if "norm2" in key:
lowerCamelCase_ = replace_key_with_offset(lowercase , lowercase , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
lowerCamelCase_ = replace_key_with_offset(lowercase , lowercase , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
lowerCamelCase_ = replace_key_with_offset(lowercase , lowercase , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
lowerCamelCase_ = key.replace('head' , 'classifier' )
lowerCamelCase_ = value
return new_state_dict
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return image
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = PoolFormerConfig()
# set attributes based on model_name
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = model_name[-3:]
lowerCamelCase_ = 10_00
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = (1, 10_00)
# set config attributes
lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCamelCase_ = [2, 2, 6, 2]
lowerCamelCase_ = [64, 1_28, 3_20, 5_12]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 0.9
elif size == "s24":
lowerCamelCase_ = [4, 4, 12, 4]
lowerCamelCase_ = [64, 1_28, 3_20, 5_12]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 0.9
elif size == "s36":
lowerCamelCase_ = [6, 6, 18, 6]
lowerCamelCase_ = [64, 1_28, 3_20, 5_12]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 1e-6
lowerCamelCase_ = 0.9
elif size == "m36":
lowerCamelCase_ = [6, 6, 18, 6]
lowerCamelCase_ = [96, 1_92, 3_84, 7_68]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 1e-6
lowerCamelCase_ = 0.95
elif size == "m48":
lowerCamelCase_ = [8, 8, 24, 8]
lowerCamelCase_ = [96, 1_92, 3_84, 7_68]
lowerCamelCase_ = 4.0
lowerCamelCase_ = 1e-6
lowerCamelCase_ = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
lowerCamelCase_ = PoolFormerImageProcessor(crop_pct=lowercase )
# Prepare image
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowercase , return_tensors='pt' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
lowerCamelCase_ = torch.load(lowercase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase_ = rename_keys(lowercase )
# create HuggingFace model and load state dict
lowerCamelCase_ = PoolFormerForImageClassification(lowercase )
model.load_state_dict(lowercase )
model.eval()
# Define image processor
lowerCamelCase_ = PoolFormerImageProcessor(crop_pct=lowercase )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCamelCase_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
lowerCamelCase_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
lowerCamelCase_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
lowerCamelCase_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
lowerCamelCase_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 651
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 1
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = ort.SessionOptions()
lowerCamelCase_ = False
return options
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
lowerCamelCase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'A red cat sitting on a park bench'
lowerCamelCase_ = np.random.RandomState(0 )
lowerCamelCase_ = pipe(
prompt=A_ , image=A_ , mask_image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=A_ , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 651
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 1
|
import string
from math import logaa
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
lowerCamelCase_ = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase_ = corpus_without_punctuation.split('\n' )
lowerCamelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int , lowercase : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 651
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return {}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
lowerCamelCase_ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MarkupLMFeatureExtractor if is_bsa_available() else None
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = MarkupLMFeatureExtractionTester(self )
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class()
# Test not batched input
lowerCamelCase_ = get_html_strings()[0]
lowerCamelCase_ = feature_extractor(A_ )
# fmt: off
lowerCamelCase_ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
lowerCamelCase_ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , A_ )
self.assertEqual(encoding.xpaths , A_ )
# Test batched
lowerCamelCase_ = get_html_strings()
lowerCamelCase_ = feature_extractor(A_ )
# fmt: off
lowerCamelCase_ = expected_nodes + [['My First Heading', 'My first paragraph.']]
lowerCamelCase_ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , A_ )
self.assertEqual(encoding.xpaths , A_ )
| 651
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
| 1
|
import qiskit
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ = qiskit.QuantumCircuit(lowercase , lowercase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCamelCase_ = qiskit.execute(lowercase , lowercase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
lowerCamelCase : str = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 651
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 1
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Union[str, Any] , lowercase : Optional[int] ):
'''simple docstring'''
if isinstance(lowercase , torch.Tensor ):
return image
elif isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowerCamelCase_ = np.concatenate(lowercase , axis=0 )
lowerCamelCase_ = np.array(lowercase ).astype(np.floataa ) / 255.0
lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ = 2.0 * image - 1.0
lowerCamelCase_ = torch.from_numpy(lowercase )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , dim=0 )
return image
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Dict , lowercase : Union[str, Any] , lowercase : Optional[int]=0.9995 ):
'''simple docstring'''
if not isinstance(lowercase , np.ndarray ):
lowerCamelCase_ = True
lowerCamelCase_ = va.device
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = np.sum(va * va / (np.linalg.norm(lowercase ) * np.linalg.norm(lowercase )) )
if np.abs(lowercase ) > DOT_THRESHOLD:
lowerCamelCase_ = (1 - t) * va + t * va
else:
lowerCamelCase_ = np.arccos(lowercase )
lowerCamelCase_ = np.sin(lowercase )
lowerCamelCase_ = theta_a * t
lowerCamelCase_ = np.sin(lowercase )
lowerCamelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ = sin_theta_t / sin_theta_a
lowerCamelCase_ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ = torch.from_numpy(lowercase ).to(lowercase )
return va
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = F.normalize(lowercase , dim=-1 )
lowerCamelCase_ = F.normalize(lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
for param in model.parameters():
lowerCamelCase_ = value
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , A_ : AutoencoderKL , A_ : CLIPTextModel , A_ : CLIPModel , A_ : CLIPTokenizer , A_ : UNetaDConditionModel , A_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , A_ : CLIPFeatureExtractor , A_ : Optional[int]=None , A_ : Tuple=None , A_ : Any=None , ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A_ , text_encoder=A_ , clip_model=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , feature_extractor=A_ , coca_model=A_ , coca_tokenizer=A_ , coca_transform=A_ , )
lowerCamelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , A_ )
else feature_extractor.size['shortest_edge']
)
lowerCamelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A_ )
set_requires_grad(self.clip_model , A_ )
def a__ ( self : Union[str, Any] , A_ : Optional[Union[str, int]] = "auto" ) -> str:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A_ )
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(A_ )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae , A_ )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
set_requires_grad(self.vae , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
set_requires_grad(self.unet , A_ )
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
set_requires_grad(self.unet , A_ )
def a__ ( self : List[Any] , A_ : Any , A_ : int , A_ : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = min(int(num_inference_steps * strength ) , A_ )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ ( self : Optional[int] , A_ : Tuple , A_ : Tuple , A_ : Dict , A_ : List[Any] , A_ : Dict , A_ : Dict=None ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(A_ )}""" )
lowerCamelCase_ = image.to(device=A_ , dtype=A_ )
if isinstance(A_ , A_ ):
lowerCamelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A_ )
]
lowerCamelCase_ = torch.cat(A_ , dim=0 )
else:
lowerCamelCase_ = self.vae.encode(A_ ).latent_dist.sample(A_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 0.18215 * init_latents
lowerCamelCase_ = init_latents.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = randn_tensor(init_latents.shape , generator=A_ , device=A_ , dtype=A_ )
# get latents
lowerCamelCase_ = self.scheduler.add_noise(A_ , A_ , A_ )
lowerCamelCase_ = init_latents
return latents
def a__ ( self : List[str] , A_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.coca_transform(A_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def a__ ( self : Union[str, Any] , A_ : str , A_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extractor.preprocess(A_ )
lowerCamelCase_ = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ = self.clip_model.get_image_features(A_ )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A_ )
lowerCamelCase_ = image_embeddings_clip.repeat_interleave(A_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ ( self : Optional[Any] , A_ : List[Any] , A_ : Dict , A_ : List[str] , A_ : Tuple , A_ : Dict , A_ : int , A_ : Tuple , ) -> str:
"""simple docstring"""
lowerCamelCase_ = latents.detach().requires_grad_()
lowerCamelCase_ = self.scheduler.scale_model_input(A_ , A_ )
# predict the noise residual
lowerCamelCase_ = self.unet(A_ , A_ , encoder_hidden_states=A_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ = torch.sqrt(A_ )
lowerCamelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A_ ):
lowerCamelCase_ = self.scheduler.sigmas[index]
lowerCamelCase_ = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.18215 * sample
lowerCamelCase_ = self.vae.decode(A_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = transforms.Resize(self.feature_extractor_size )(A_ )
lowerCamelCase_ = self.normalize(A_ ).to(latents.dtype )
lowerCamelCase_ = self.clip_model.get_image_features(A_ )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A_ )
lowerCamelCase_ = spherical_dist_loss(A_ , A_ ).mean() * clip_guidance_scale
lowerCamelCase_ = -torch.autograd.grad(A_ , A_ )[0]
if isinstance(self.scheduler , A_ ):
lowerCamelCase_ = latents.detach() + grads * (sigma**2)
lowerCamelCase_ = noise_pred_original
else:
lowerCamelCase_ = noise_pred_original - torch.sqrt(A_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : int , A_ : Union[torch.FloatTensor, PIL.Image.Image] , A_ : Union[torch.FloatTensor, PIL.Image.Image] , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[int] = 512 , A_ : Optional[int] = 512 , A_ : float = 0.6 , A_ : Optional[int] = 50 , A_ : Optional[float] = 7.5 , A_ : Optional[int] = 1 , A_ : float = 0.0 , A_ : Optional[float] = 100 , A_ : Optional[torch.Generator] = None , A_ : Optional[str] = "pil" , A_ : bool = True , A_ : float = 0.8 , A_ : float = 0.1 , A_ : float = 0.1 , ) -> int:
"""simple docstring"""
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(A_ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(A_ , torch.Generator ) and batch_size > 1:
lowerCamelCase_ = [generator] + [None] * (batch_size - 1)
lowerCamelCase_ = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
lowerCamelCase_ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ = ', '.join(A_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A_ ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowerCamelCase_ = self.get_image_description(A_ )
if style_prompt is None:
if len(A_ ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowerCamelCase_ = self.get_image_description(A_ )
# get prompt text embeddings for content and style
lowerCamelCase_ = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=A_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = slerp(A_ , A_ , A_ )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ = text_embeddings.repeat_interleave(A_ , dim=0 )
# set timesteps
lowerCamelCase_ = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_offset:
lowerCamelCase_ = 1
self.scheduler.set_timesteps(A_ , **A_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ , lowerCamelCase_ = self.get_timesteps(A_ , A_ , self.device )
lowerCamelCase_ = timesteps[:1].repeat(A_ )
# Preprocess image
lowerCamelCase_ = preprocess(A_ , A_ , A_ )
lowerCamelCase_ = self.prepare_latents(
A_ , A_ , A_ , text_embeddings.dtype , self.device , A_ )
lowerCamelCase_ = preprocess(A_ , A_ , A_ )
lowerCamelCase_ = self.prepare_latents(
A_ , A_ , A_ , text_embeddings.dtype , self.device , A_ )
lowerCamelCase_ = slerp(A_ , A_ , A_ )
if clip_guidance_scale > 0:
lowerCamelCase_ = self.get_clip_image_embeddings(A_ , A_ )
lowerCamelCase_ = self.get_clip_image_embeddings(A_ , A_ )
lowerCamelCase_ = slerp(
A_ , A_ , A_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = content_text_input.input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer([''] , padding='max_length' , max_length=A_ , return_tensors='pt' )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ = uncond_embeddings.repeat_interleave(A_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ = torch.randn(A_ , generator=A_ , device='cpu' , dtype=A_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(A_ , generator=A_ , device=self.device , dtype=A_ )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCamelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
# check if the scheduler accepts generator
lowerCamelCase_ = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ = generator
with self.progress_bar(total=A_ ):
for i, t in enumerate(A_ ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(A_ , A_ )
# predict the noise residual
lowerCamelCase_ = self.unet(A_ , A_ , encoder_hidden_states=A_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ , lowerCamelCase_ = self.cond_fn(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.18215 * latents
lowerCamelCase_ = self.vae.decode(A_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A_ , nsfw_content_detected=A_ )
| 651
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return EnvironmentCommand()
class A( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = 'not installed'
lowerCamelCase_ = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = 'not installed'
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = 'not installed'
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = 'not installed'
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def a__ ( A_ : Dict ) -> Any:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 651
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''audio''': Audio()} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "audio"
UpperCamelCase = "labels"
def a__ ( self : Optional[int] , A_ : int ) -> List[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , A_ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCamelCase_ = copy.deepcopy(self )
lowerCamelCase_ = self.label_schema.copy()
lowerCamelCase_ = features[self.label_column]
lowerCamelCase_ = label_schema
return task_template
@property
def a__ ( self : Any ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 651
|
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : bool = False ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
lowerCamelCase_ = f"""Expected string as input, found {type(lowercase )}"""
raise ValueError(lowercase )
if not isinstance(lowercase , lowercase ):
lowerCamelCase_ = f"""Expected boolean as use_pascal parameter, found {type(lowercase )}"""
raise ValueError(lowercase )
lowerCamelCase_ = input_str.split('_' )
lowerCamelCase_ = 0 if use_pascal else 1
lowerCamelCase_ = words[start_index:]
lowerCamelCase_ = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase_ = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 651
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : int , A_ : int=13 , A_ : List[Any]=7 , A_ : List[Any]=True , A_ : Union[str, Any]=True , A_ : Optional[Any]=True , A_ : str=True , A_ : Optional[int]=99 , A_ : List[Any]=64 , A_ : int=32 , A_ : List[str]=5 , A_ : Union[str, Any]=4 , A_ : List[Any]=37 , A_ : List[str]="gelu" , A_ : Dict=0.1 , A_ : List[Any]=0.1 , A_ : int=512 , A_ : Dict=16 , A_ : List[Any]=2 , A_ : Optional[Any]=0.02 , A_ : Dict=3 , A_ : Optional[int]=4 , A_ : str=None , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = embedding_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def a__ ( self : List[Any] , A_ : int , A_ : str , A_ : Union[str, Any] , A_ : int , A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = MegatronBertModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ )
lowerCamelCase_ = model(A_ , token_type_ids=A_ )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Optional[Any] , A_ : List[str] , A_ : List[Any] , A_ : Optional[Any] , A_ : List[str] , A_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = MegatronBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Tuple , A_ : Dict , A_ : str , A_ : Optional[int] , A_ : Optional[int] , A_ : Optional[int] , A_ : Optional[Any] , A_ : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = MegatronBertForCausalLM(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Tuple , A_ : str , A_ : Any , A_ : Optional[int] , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = MegatronBertForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a__ ( self : Tuple , A_ : Tuple , A_ : int , A_ : Optional[int] , A_ : List[Any] , A_ : List[str] , A_ : str , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = MegatronBertForPreTraining(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a__ ( self : List[Any] , A_ : str , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : Any , A_ : Dict , A_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = MegatronBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : List[str] , A_ : List[str] , A_ : Dict , A_ : str , A_ : Optional[int] , A_ : Dict , A_ : Dict , A_ : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MegatronBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Dict , A_ : Union[str, Any] , A_ : List[Any] , A_ : Dict , A_ : Dict , A_ : Any , A_ : Optional[int] , A_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MegatronBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[Any] , A_ : Optional[int] , A_ : str , A_ : Any , A_ : Tuple , A_ : Optional[int] , A_ : List[str] , A_ : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = MegatronBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
# test_resize_embeddings = False
UpperCamelCase = False
def a__ ( self : Any , A_ : List[Any] , A_ : Any , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = MegatronBertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A_ )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A_ )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A_ )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A_ )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A_ )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A_ )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A_ )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
return torch.tensor(
lowercase , dtype=torch.long , device=lowercase , )
lowerCamelCase : Union[str, Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('Model is not available.' )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
lowerCamelCase_ = os.path.join(os.environ['MYDIR'] , A_ )
lowerCamelCase_ = MegatronBertModel.from_pretrained(A_ )
model.to(A_ )
model.half()
lowerCamelCase_ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A_ )
lowerCamelCase_ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCamelCase_ = output[0, ii, jj]
lowerCamelCase_ = expected[3 * ii + jj]
lowerCamelCase_ = 'ii={} jj={} a={} b={}'.format(A_ , A_ , A_ , A_ )
self.assertTrue(math.isclose(A_ , A_ , rel_tol=A_ , abs_tol=A_ ) , msg=A_ )
| 651
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
lowerCamelCase_ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowerCamelCase_ = True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowerCamelCase_ , lowerCamelCase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCamelCase_ = False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowerCamelCase_ , lowerCamelCase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCamelCase_ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
lowerCamelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase : Tuple = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 651
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = word
return word
def a__ ( self : str , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
for token in re.findall(self.pat , A_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def a__ ( self : Tuple , A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 651
| 1
|
from __future__ import annotations
lowerCamelCase : Dict = "Muhammad Umer Farooq"
lowerCamelCase : Any = "MIT"
lowerCamelCase : List[Any] = "1.0.0"
lowerCamelCase : Dict = "Muhammad Umer Farooq"
lowerCamelCase : Tuple = "contact@muhammadumerfarooq.me"
lowerCamelCase : Any = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , A_ : str ) -> None:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = []
lowerCamelCase_ = domain
def a__ ( self : Dict , A_ : str , A_ : list[tuple[str, str | None]] ) -> None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCamelCase_ = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
return ".".join(get_sub_domain_name(lowercase ).split('.' )[-2:] )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
return parse.urlparse(lowercase ).netloc
def _SCREAMING_SNAKE_CASE ( lowercase : str = "https://github.com" ):
'''simple docstring'''
lowerCamelCase_ = get_domain_name(lowercase )
# Initialize the parser
lowerCamelCase_ = Parser(lowercase )
try:
# Open URL
lowerCamelCase_ = requests.get(lowercase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCamelCase_ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCamelCase_ = requests.get(lowercase )
# Get the valid email.
lowerCamelCase_ = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase )
if __name__ == "__main__":
lowerCamelCase : str = emails_from_url("https://github.com")
print(F"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 651
|
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
lowerCamelCase_ = float('-inf' )
for i in range(1 , n + 1 ):
lowerCamelCase_ = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
_enforce_args(lowercase , lowercase )
lowerCamelCase_ = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list , lowercase : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCamelCase_ = float('-inf' )
for i in range(1 , n + 1 ):
lowerCamelCase_ = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
lowerCamelCase_ = max_revenue
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCamelCase_ = [float('-inf' ) for _ in range(n + 1 )]
lowerCamelCase_ = 0
for i in range(1 , n + 1 ):
lowerCamelCase_ = max_rev[i]
for j in range(1 , i + 1 ):
lowerCamelCase_ = max(lowercase , prices[j - 1] + max_rev[i - j] )
lowerCamelCase_ = max_revenue_i
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
if n < 0:
lowerCamelCase_ = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(lowercase )
if n > len(lowercase ):
lowerCamelCase_ = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(lowercase )}"""
)
raise ValueError(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = [6, 10, 12, 15, 20, 23]
lowerCamelCase_ = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCamelCase_ = 36
lowerCamelCase_ = top_down_cut_rod(lowercase , lowercase )
lowerCamelCase_ = bottom_up_cut_rod(lowercase , lowercase )
lowerCamelCase_ = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 651
|
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , A_ : Distribution , A_ : Any=None , A_ : str=None , A_ : Optional[int]=0 ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 1.0 if scale is None else scale
lowerCamelCase_ = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return self.variance.sqrt()
class A( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : int , A_ : Dict[str, int] , A_ : Callable[..., Tuple[torch.Tensor]] , **A_ : Any ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = args_dim
lowerCamelCase_ = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
lowerCamelCase_ = domain_map
def a__ ( self : Any , A_ : torch.Tensor ) -> Tuple[torch.Tensor]:
"""simple docstring"""
lowerCamelCase_ = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class A( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , A_ : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = function
def a__ ( self : Dict , A_ : Dict , *A_ : str ) -> Any:
"""simple docstring"""
return self.function(A_ , *A_ )
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self : List[Any] , A_ : int = 1 ) -> None:
"""simple docstring"""
lowerCamelCase_ = dim
lowerCamelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self : Any , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def a__ ( self : List[Any] , A_ : Tuple , A_ : Optional[torch.Tensor] = None , A_ : Optional[torch.Tensor] = None , ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def a__ ( self : Tuple ) -> float:
"""simple docstring"""
return 0.0
def a__ ( self : int , A_ : int ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self : Any , *A_ : torch.Tensor ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def a__ ( A_ : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def a__ ( cls : Dict , A_ : torch.Tensor , A_ : torch.Tensor , A_ : torch.Tensor ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase_ = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def a__ ( cls : Any , A_ : torch.Tensor , A_ : torch.Tensor ) -> str:
"""simple docstring"""
lowerCamelCase_ = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def a__ ( cls : Optional[int] , A_ : torch.Tensor , A_ : torch.Tensor ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self : List[Any] , A_ : Union[str, Any] ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def a__ ( self : Optional[Any] , A_ : Optional[int] , A_ : Optional[torch.Tensor] = None , A_ : Optional[torch.Tensor] = None ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 651
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = r'\w+[.]\d+'
lowerCamelCase_ = re.findall(lowercase , lowercase )
for pat in pats:
lowerCamelCase_ = key.replace(lowercase , '_'.join(pat.split('.' ) ) )
return key
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase_ = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase_ = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase_ = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
lowerCamelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Dict , lowercase : Optional[int]=42 ):
'''simple docstring'''
lowerCamelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase_ = flax_model.init_weights(PRNGKey(lowercase ) )
lowerCamelCase_ = flatten_dict(lowercase )
lowerCamelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ = rename_key(lowercase )
lowerCamelCase_ = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
lowerCamelCase_ , lowerCamelCase_ = rename_key_and_reshape_tensor(lowercase , lowercase , lowercase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCamelCase_ = jnp.asarray(lowercase )
return unflatten_dict(lowercase )
| 651
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase : Any = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
lowerCamelCase : str = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
lowerCamelCase : str = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : str = np.expand_dims(test_image, axis=0)
lowerCamelCase : str = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Any = "Normal"
if result[0][0] == 1:
lowerCamelCase : str = "Abnormality detected"
| 651
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int:
"""simple docstring"""
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(A_ , mode=A_ )
lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(A_ , 'w' , newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , 'r' ) as f:
self.assertTrue(f.read() , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
| 651
| 1
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = word
return word
def a__ ( self : str , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
for token in re.findall(self.pat , A_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def a__ ( self : Tuple , A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 651
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 651
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , A_ : Union[str, Any] , A_ : Optional[Any] ) -> Any:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self : Optional[Any] , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : float = 0.0 , A_ : int = 50 , A_ : Optional[bool] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , A_ ):
lowerCamelCase_ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ = self.unet(A_ , A_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
A_ , A_ , A_ , eta=A_ , use_clipped_model_output=A_ , generator=A_ ).prev_sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 651
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 1
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 1
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 1
|
lowerCamelCase : List[str] = [0, 2, 4, 6, 8]
lowerCamelCase : List[str] = [1, 3, 5, 7, 9]
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase_ = 0
for digit in range(10 ):
lowerCamelCase_ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowercase , lowercase )
return result
lowerCamelCase_ = 0
for digita in range(10 ):
lowerCamelCase_ = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase_ = ODD_DIGITS
else:
lowerCamelCase_ = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase_ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowercase , lowercase , )
return result
def _SCREAMING_SNAKE_CASE ( lowercase : int = 9 ):
'''simple docstring'''
lowerCamelCase_ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowercase , 0 , [0] * length , lowercase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 651
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 1
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _SCREAMING_SNAKE_CASE ( lowercase : Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , lowercase , )
if isinstance(lowercase , torch.Tensor ):
return image
elif isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image[0].size
lowerCamelCase_ , lowerCamelCase_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowerCamelCase_ = np.concatenate(lowercase , axis=0 )
lowerCamelCase_ = np.array(lowercase ).astype(np.floataa ) / 255.0
lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ = 2.0 * image - 1.0
lowerCamelCase_ = torch.from_numpy(lowercase )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , dim=0 )
return image
def _SCREAMING_SNAKE_CASE ( lowercase : Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
if isinstance(lowercase , torch.Tensor ):
return mask
elif isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = mask[0].size
lowerCamelCase_ , lowerCamelCase_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCamelCase_ = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
lowerCamelCase_ = np.concatenate(lowercase , axis=0 )
lowerCamelCase_ = mask.astype(np.floataa ) / 255.0
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = torch.from_numpy(lowercase )
elif isinstance(mask[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , dim=0 )
return mask
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self : Tuple , A_ : Any , A_ : List[Any] ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple , A_ : Union[torch.Tensor, PIL.Image.Image] , A_ : Union[torch.Tensor, PIL.Image.Image] , A_ : int = 250 , A_ : float = 0.0 , A_ : int = 10 , A_ : int = 10 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_ = image
lowerCamelCase_ = _preprocess_image(A_ )
lowerCamelCase_ = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCamelCase_ = _preprocess_mask(A_ )
lowerCamelCase_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCamelCase_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCamelCase_ = original_image.shape
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A_ , A_ , A_ , self.device )
lowerCamelCase_ = eta
lowerCamelCase_ = self.scheduler.timesteps[0] + 1
lowerCamelCase_ = generator[0] if isinstance(A_ , A_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCamelCase_ = self.unet(A_ , A_ ).sample
# compute previous image: x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ , A_ , A_ , A_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCamelCase_ = self.scheduler.undo_step(A_ , A_ , A_ )
lowerCamelCase_ = t
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 651
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 1
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = WavaVecaPhonemeCTCTokenizer
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def a__ ( self : Optional[int] , A_ : int , A_ : Union[str, Any]=False , A_ : List[Any]=20 , A_ : List[Any]=5 ) -> Tuple[str, list]:
"""simple docstring"""
lowerCamelCase_ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )) for i in range(len(A_ ) )]
lowerCamelCase_ = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
lowerCamelCase_ = toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
lowerCamelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase_ = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase_ = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
lowerCamelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
lowerCamelCase_ = ' ' + output_txt
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def a__ ( self : str , **A_ : str ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
lowerCamelCase_ = tokenizer('m xxx ɪ' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
lowerCamelCase_ = tokenizer('m aaa ɪ ccc' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCamelCase_ = tokenizer('maɪ c' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [3, 200] ) # mai should be <unk> (=3)
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCamelCase_ = 'Hello how are you'
lowerCamelCase_ = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(A_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCamelCase_ = 'Hello how are you'
lowerCamelCase_ = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(A_ ).input_ids , tokenizer(A_ , do_phonemize=A_ ).input_ids )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCamelCase_ = 'Hello how are you'
lowerCamelCase_ = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
lowerCamelCase_ = tokenizer.decode(tokenizer(A_ ).input_ids )
self.assertEqual(A_ , A_ )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCamelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCamelCase_ = tokenizer.decode(sample_ids[0] )
lowerCamelCase_ = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCamelCase_ = 'Hello how are you'
lowerCamelCase_ = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(A_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCamelCase_ = 'Hello how are you'
lowerCamelCase_ = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(A_ ).input_ids , tokenizer(A_ , do_phonemize=A_ ).input_ids )
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
lowerCamelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCamelCase_ = tokenizer.decode(sample_ids[0] )
lowerCamelCase_ = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
lowerCamelCase_ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=A_ )
lowerCamelCase_ = tokenizer.batch_decode(A_ , filter_word_delimiter_token=A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCamelCase_ = 'Hello how are you'
lowerCamelCase_ = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
lowerCamelCase_ = tokenizer.decode(tokenizer(A_ ).input_ids , filter_word_delimiter_token=A_ )
self.assertEqual(A_ , A_ )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCamelCase_ = 'Hello how are you'
lowerCamelCase_ = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
lowerCamelCase_ = tokenizer.decode(tokenizer(A_ ).input_ids , filter_word_delimiter_token=A_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , A_ )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=A_ )
lowerCamelCase_ = 'Hello how are you'
lowerCamelCase_ = tokenizer(A_ , phonemizer_lang='en-us' ).input_ids
lowerCamelCase_ = tokenizer(A_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.decode(A_ )
lowerCamelCase_ = tokenizer.decode(A_ )
self.assertEqual(A_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(A_ , 'ɛ l o h aʊ a ʁ j u' )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCamelCase_ = 'Hello how Are you'
lowerCamelCase_ = 'hello how are you'
lowerCamelCase_ = tokenizer(A_ ).input_ids
lowerCamelCase_ = tokenizer(A_ ).input_ids
self.assertEqual(A_ , A_ )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
lowerCamelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCamelCase_ = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def a__ ( A_ : str , A_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = [d[key] for d in offsets]
return retrieved_list
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCamelCase_ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCamelCase_ = tokenizer.decode(A_ , output_char_offsets=A_ , filter_word_delimiter_token=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(A_ : Optional[int] , A_ : int ):
self.assertTrue(isinstance(A_ , A_ ) )
self.assertTrue(isinstance(outputs_list[0] , A_ ) )
# transform list to ModelOutput
lowerCamelCase_ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(A_ : List[str] , A_ : Optional[int] ):
if isinstance(A_ , A_ ):
[recursive_check(A_ , A_ ) for la, la in zip(A_ , A_ )]
self.assertEqual(A_ , A_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
lowerCamelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCamelCase_ = tokenizer.batch_decode(A_ , output_char_offsets=A_ )
lowerCamelCase_ = [tokenizer.decode(A_ , output_char_offsets=A_ ) for ids in sample_ids]
check_list_tuples_equal(A_ , A_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ = tokenizer.vocab_size
lowerCamelCase_ = len(A_ )
self.assertNotEqual(A_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCamelCase_ = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCamelCase_ = tokenizer.add_tokens(A_ )
lowerCamelCase_ = tokenizer.vocab_size
lowerCamelCase_ = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size + len(A_ ) )
lowerCamelCase_ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCamelCase_ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCamelCase_ = tokenizer.add_special_tokens(A_ )
lowerCamelCase_ = tokenizer.vocab_size
lowerCamelCase_ = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size_a + len(A_ ) )
lowerCamelCase_ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def a__ ( self : str ) -> Any:
"""simple docstring"""
pass
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
lowerCamelCase_ = tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(output['text'] , A_ )
| 651
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 1
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase : Dict = get_tests_dir("fixtures/dummy-config.json")
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 0
def a__ ( self : Any ) -> int:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def a__ ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(A_ , A_ )
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = AutoConfig.for_model('roberta' )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCamelCase_ = os.path.join(A_ , 'fake-roberta' )
os.makedirs(A_ , exist_ok=A_ )
with open(os.path.join(A_ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertEqual(type(A_ ) , A_ )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('custom' , A_ )
# Wrong model type will raise an error
with self.assertRaises(A_ ):
AutoConfig.register('model' , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoConfig.register('bert' , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A_ )
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = AutoConfig.from_pretrained('bert-base' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = AutoConfig.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
lowerCamelCase_ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(A_ ):
lowerCamelCase_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
lowerCamelCase_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=A_ )
lowerCamelCase_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A_ )
lowerCamelCase_ = AutoConfig.from_pretrained(A_ , trust_remote_code=A_ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
try:
AutoConfig.register('new-model' , A_ )
# If remote code is not set, the default is to use local
lowerCamelCase_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
lowerCamelCase_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
lowerCamelCase_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 651
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 1
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase : Tuple = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[Any]=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase_ = random.Random()
lowerCamelCase_ = 1
for dim in shape:
total_dims *= dim
lowerCamelCase_ = []
for _ in range(lowercase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCamelCase_ = np.array(lowercase , dtype=jnp.intaa ).reshape(lowercase )
return output
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Tuple=None ):
'''simple docstring'''
lowerCamelCase_ = ids_tensor(lowercase , vocab_size=2 , rng=lowercase )
# make sure that at least one token is attended to for each batch
lowerCamelCase_ = 1
return attn_mask
@require_flax
class A:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = ()
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCamelCase_ = 2
lowerCamelCase_ = inputs['input_ids'].shape[-1] // 2
lowerCamelCase_ = inputs['input_ids'][:max_batch_size, :sequence_length]
lowerCamelCase_ = jnp.ones_like(A_ )
lowerCamelCase_ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCamelCase_ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCamelCase_ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
lowerCamelCase_ = False
lowerCamelCase_ = max_length
lowerCamelCase_ = 0
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase_ = getattr(A_ , A_ )
lowerCamelCase_ = pt_model_class(A_ ).eval()
lowerCamelCase_ = load_flax_weights_in_pytorch_model(A_ , flax_model.params )
lowerCamelCase_ = flax_model.generate(A_ ).sequences
lowerCamelCase_ = pt_model.generate(torch.tensor(A_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCamelCase_ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
lowerCamelCase_ = False
lowerCamelCase_ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , A_ )
lowerCamelCase_ = jit(model.generate )
lowerCamelCase_ = jit_generate(A_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
lowerCamelCase_ = True
lowerCamelCase_ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , A_ )
lowerCamelCase_ = jit(model.generate )
lowerCamelCase_ = jit_generate(A_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
lowerCamelCase_ = False
lowerCamelCase_ = max_length
lowerCamelCase_ = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , A_ )
lowerCamelCase_ = jit(model.generate )
lowerCamelCase_ = jit_generate(A_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
lowerCamelCase_ = False
lowerCamelCase_ = max_length
lowerCamelCase_ = 2
lowerCamelCase_ = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
lowerCamelCase_ = True
lowerCamelCase_ = max_length
lowerCamelCase_ = 0.8
lowerCamelCase_ = 10
lowerCamelCase_ = 0.3
lowerCamelCase_ = 1
lowerCamelCase_ = 8
lowerCamelCase_ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , A_ )
lowerCamelCase_ = jit(model.generate )
lowerCamelCase_ = jit_generate(A_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
lowerCamelCase_ = max_length
lowerCamelCase_ = 1
lowerCamelCase_ = 8
lowerCamelCase_ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , A_ )
lowerCamelCase_ = jit(model.generate )
lowerCamelCase_ = jit_generate(A_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
lowerCamelCase_ = max_length
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 8
lowerCamelCase_ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , A_ )
lowerCamelCase_ = jit(model.generate )
lowerCamelCase_ = jit_generate(A_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase_ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase_ = False
lowerCamelCase_ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ , attention_mask=A_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , A_ )
lowerCamelCase_ = jit(model.generate )
lowerCamelCase_ = jit_generate(A_ , attention_mask=A_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase_ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase_ = True
lowerCamelCase_ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ , attention_mask=A_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , A_ )
lowerCamelCase_ = jit(model.generate )
lowerCamelCase_ = jit_generate(A_ , attention_mask=A_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase_ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase_ = 2
lowerCamelCase_ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model.generate(A_ , attention_mask=A_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , A_ )
lowerCamelCase_ = jit(model.generate )
lowerCamelCase_ = jit_generate(A_ , attention_mask=A_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
lowerCamelCase_ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = tokenizer(A_ , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(A_ , 'do_samples' ):
model.generate(A_ , do_samples=A_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(A_ , 'foo' ):
lowerCamelCase_ = {'foo': 'bar'}
model.generate(A_ , **A_ )
| 651
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Any = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''levit'''
def __init__( self : int , A_ : Optional[int]=224 , A_ : Union[str, Any]=3 , A_ : Dict=3 , A_ : Optional[Any]=2 , A_ : List[Any]=1 , A_ : Optional[Any]=16 , A_ : str=[128, 256, 384] , A_ : str=[4, 8, 12] , A_ : Optional[Any]=[4, 4, 4] , A_ : int=[16, 16, 16] , A_ : Union[str, Any]=0 , A_ : int=[2, 2, 2] , A_ : int=[2, 2, 2] , A_ : Optional[Any]=0.02 , **A_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = kernel_size
lowerCamelCase_ = stride
lowerCamelCase_ = padding
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = depths
lowerCamelCase_ = key_dim
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = patch_size
lowerCamelCase_ = attention_ratio
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = initializer_range
lowerCamelCase_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def a__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ ( self : str ) -> float:
"""simple docstring"""
return 1E-4
| 651
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
debug_launcher(test_script.main )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 651
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return EnvironmentCommand()
class A( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = 'not installed'
lowerCamelCase_ = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = 'not installed'
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = 'not installed'
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = 'not installed'
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def a__ ( A_ : Dict ) -> Any:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 651
| 1
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : list[float] ):
'''simple docstring'''
if len(lowercase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
lowerCamelCase_ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
|
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 1
|
from jiwer import compute_measures
import datasets
lowerCamelCase : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowerCamelCase : Union[str, Any] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowerCamelCase : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def a__ ( self : int , A_ : List[str]=None , A_ : Union[str, Any]=None , A_ : Dict=False ) -> Any:
"""simple docstring"""
if concatenate_texts:
return compute_measures(A_ , A_ )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(A_ , A_ ):
lowerCamelCase_ = compute_measures(A_ , A_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 651
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = word
return word
def a__ ( self : str , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
for token in re.findall(self.pat , A_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def a__ ( self : Tuple , A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 651
| 1
|
from __future__ import annotations
import bisect
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : int , lowercase : int = 0 , lowercase : int = -1 ):
'''simple docstring'''
if hi < 0:
lowerCamelCase_ = len(lowercase )
while lo < hi:
lowerCamelCase_ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCamelCase_ = mid + 1
else:
lowerCamelCase_ = mid
return lo
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : int , lowercase : int = 0 , lowercase : int = -1 ):
'''simple docstring'''
if hi < 0:
lowerCamelCase_ = len(lowercase )
while lo < hi:
lowerCamelCase_ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCamelCase_ = mid + 1
else:
lowerCamelCase_ = mid
return lo
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : int , lowercase : int = 0 , lowercase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : int , lowercase : int = 0 , lowercase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = len(lowercase ) - 1
while left <= right:
lowerCamelCase_ = left + (right - left) // 2
lowerCamelCase_ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCamelCase_ = midpoint - 1
else:
lowerCamelCase_ = midpoint + 1
return None
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if right < left:
return None
lowerCamelCase_ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
lowerCamelCase : Dict = input("Enter numbers separated by comma:\n").strip()
lowerCamelCase : Tuple = sorted(int(item) for item in user_input.split(","))
lowerCamelCase : int = int(input("Enter a single number to be found in the list:\n"))
lowerCamelCase : List[str] = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 651
|
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = 1
lowerCamelCase_ = 3
lowerCamelCase_ = (32, 32)
lowerCamelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(A_ )
@property
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
def extract(*A_ : List[Any] , **A_ : Optional[int] ):
class A:
'''simple docstring'''
def __init__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = torch.ones([0] )
def a__ ( self : int , A_ : List[Any] ) -> Tuple:
"""simple docstring"""
self.pixel_values.to(A_ )
return self
return Out()
return extract
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.dummy_cond_unet
lowerCamelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
lowerCamelCase_ = self.dummy_vae
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ = StableDiffusionPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(0 )
lowerCamelCase_ = sd_pipe([prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
lowerCamelCase_ = output.images
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=A_ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.dummy_cond_unet
lowerCamelCase_ = PNDMScheduler(skip_prk_steps=A_ )
lowerCamelCase_ = self.dummy_vae
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ = StableDiffusionPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(0 )
lowerCamelCase_ = sd_pipe([prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
lowerCamelCase_ = output.images
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=A_ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=A_ )
assert isinstance(A_ , A_ )
assert isinstance(pipe.scheduler , A_ )
assert pipe.safety_checker is None
lowerCamelCase_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A_ )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(A_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.dummy_cond_unet
lowerCamelCase_ = PNDMScheduler(skip_prk_steps=A_ )
lowerCamelCase_ = self.dummy_vae
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
lowerCamelCase_ = unet.half()
lowerCamelCase_ = vae.half()
lowerCamelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ = StableDiffusionPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=A_ )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
lowerCamelCase_ = 4003660346
lowerCamelCase_ = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCamelCase_ = torch.manual_seed(A_ )
lowerCamelCase_ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowerCamelCase_ = torch.manual_seed(A_ )
lowerCamelCase_ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=A_ )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'padme amidala taking a bath artwork, safe for work, no nudity'
lowerCamelCase_ = 2734971755
lowerCamelCase_ = 7
lowerCamelCase_ = torch.manual_seed(A_ )
lowerCamelCase_ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowerCamelCase_ = torch.manual_seed(A_ )
lowerCamelCase_ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
lowerCamelCase_ = 1044355234
lowerCamelCase_ = 12
lowerCamelCase_ = torch.manual_seed(A_ )
lowerCamelCase_ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowerCamelCase_ = torch.manual_seed(A_ )
lowerCamelCase_ = sd_pipe(
[prompt] , generator=A_ , guidance_scale=A_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 651
|
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651
| 1
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
| 1
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = sd_pipe.prepare_inputs(A_ )
lowerCamelCase_ = replicate(A_ )
lowerCamelCase_ = shard(A_ )
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = jax.random.split(A_ , jax.device_count() )
lowerCamelCase_ = sd_pipe(A_ , A_ , A_ , num_inference_steps=25 , jit=A_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase_ = images[0, 253:256, 253:256, -1]
lowerCamelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase_ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = 'stabilityai/stable-diffusion-2'
lowerCamelCase_ , lowerCamelCase_ = FlaxDPMSolverMultistepScheduler.from_pretrained(A_ , subfolder='scheduler' )
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
A_ , scheduler=A_ , revision='bf16' , dtype=jnp.bfloataa , )
lowerCamelCase_ = scheduler_params
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = sd_pipe.prepare_inputs(A_ )
lowerCamelCase_ = replicate(A_ )
lowerCamelCase_ = shard(A_ )
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = jax.random.split(A_ , jax.device_count() )
lowerCamelCase_ = sd_pipe(A_ , A_ , A_ , num_inference_steps=25 , jit=A_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase_ = images[0, 253:256, 253:256, -1]
lowerCamelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase_ = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 651
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
| 1
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class A:
'''simple docstring'''
def __init__( self : int , A_ : str , A_ : Optional[Any]=13 , A_ : List[Any]=7 , A_ : int=True , A_ : Union[str, Any]=True , A_ : int=False , A_ : Optional[Any]=True , A_ : List[Any]=99 , A_ : str=32 , A_ : Union[str, Any]=5 , A_ : Optional[Any]=4 , A_ : str=37 , A_ : str="gelu" , A_ : Dict=0.1 , A_ : str=0.1 , A_ : Optional[int]=512 , A_ : Optional[int]=16 , A_ : List[Any]=2 , A_ : Dict=0.02 , A_ : Dict=3 , A_ : Union[str, Any]=4 , A_ : List[str]=None , ) -> int:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def a__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Tuple , A_ : Dict , A_ : List[str] , A_ : Union[str, Any] , A_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BioGptModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Optional[Any] , A_ : int , A_ : List[str] , A_ : Optional[Any] , A_ : Tuple , A_ : List[str] , A_ : List[Any] , A_ : Any , A_ : Dict , A_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BioGptForCausalLM(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Union[str, Any] , A_ : Optional[int] , A_ : Tuple , A_ : List[Any] , A_ : str , A_ : str , *A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = BioGptModel(config=A_ )
model.to(A_ )
model.eval()
# create attention mask
lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=A_ )
lowerCamelCase_ = self.seq_length // 2
lowerCamelCase_ = 0
# first forward pass
lowerCamelCase_ , lowerCamelCase_ = model(A_ , attention_mask=A_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCamelCase_ = ids_tensor((1,) , A_ ).item() + 1
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCamelCase_ = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A_ )] , dim=1 , )
# get two different outputs
lowerCamelCase_ = model(A_ , attention_mask=A_ )['last_hidden_state']
lowerCamelCase_ = model(A_ , past_key_values=A_ , attention_mask=A_ )['last_hidden_state']
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def a__ ( self : Dict , A_ : List[str] , A_ : Any , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Tuple , *A_ : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = BioGptModel(config=A_ ).to(A_ ).eval()
lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=A_ )
# first forward pass
lowerCamelCase_ = model(A_ , attention_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase_ = model(A_ , attention_mask=A_ )['last_hidden_state']
lowerCamelCase_ = model(A_ , attention_mask=A_ , past_key_values=A_ )[
'last_hidden_state'
]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def a__ ( self : Union[str, Any] , A_ : List[Any] , A_ : Tuple , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : List[str] , *A_ : Tuple , A_ : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = BioGptForCausalLM(A_ )
model.to(A_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def a__ ( self : Union[str, Any] , A_ : Tuple , *A_ : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BioGptModel(A_ )
lowerCamelCase_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def a__ ( self : int , A_ : List[Any] , A_ : Optional[int] , A_ : Any , A_ : Tuple , A_ : Tuple , *A_ : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = BioGptForTokenClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = BioGptModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A_ )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A_ , gradient_checkpointing=A_ )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A_ )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A_ )
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A_ )
@slow
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(A_ )
lowerCamelCase_ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowerCamelCase_ = 'left'
# Define PAD Token = EOS Token = 50256
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = model.config.eos_token_id
# use different length sentences to test batching
lowerCamelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCamelCase_ = tokenizer(A_ , return_tensors='pt' , padding=A_ )
lowerCamelCase_ = inputs['input_ids'].to(A_ )
lowerCamelCase_ = model.generate(
input_ids=A_ , attention_mask=inputs['attention_mask'].to(A_ ) , )
lowerCamelCase_ = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(A_ )
lowerCamelCase_ = model.generate(input_ids=A_ )
lowerCamelCase_ = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
lowerCamelCase_ = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(A_ )
lowerCamelCase_ = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase_ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
lowerCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_ = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = BioGptModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = input_dict['input_ids']
lowerCamelCase_ = input_ids.ne(1 ).to(A_ )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = BioGptForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = 'multi_label_classification'
lowerCamelCase_ = input_dict['input_ids']
lowerCamelCase_ = input_ids.ne(1 ).to(A_ )
lowerCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ = BioGptForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
lowerCamelCase_ = torch.tensor([[2, 4805, 9, 656, 21]] )
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = 42384
lowerCamelCase_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A_ )
lowerCamelCase_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1E-4 ) )
@slow
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowerCamelCase_ = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(A_ )
torch.manual_seed(0 )
lowerCamelCase_ = tokenizer('COVID-19 is' , return_tensors='pt' ).to(A_ )
lowerCamelCase_ = model.generate(
**A_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=A_ , )
lowerCamelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
lowerCamelCase_ = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(A_ , A_ )
| 651
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int:
"""simple docstring"""
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(A_ , mode=A_ )
lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(A_ , 'w' , newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , 'r' ) as f:
self.assertTrue(f.read() , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
| 651
| 1
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
lowerCamelCase : str = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
lowerCamelCase : List[Any] = "</w>"
lowerCamelCase : Union[str, Any] = "@@ "
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
# Speech2Text2 has no max input length
lowerCamelCase : Tuple = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , A_ : Union[str, Any] , A_ : List[Any]="<s>" , A_ : Union[str, Any]="<pad>" , A_ : str="</s>" , A_ : Optional[int]="<unk>" , A_ : Optional[int]=False , A_ : Any=None , **A_ : Tuple , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , do_lower_case=A_ , **A_ , )
lowerCamelCase_ = do_lower_case
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
lowerCamelCase_ = None
lowerCamelCase_ = None
else:
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[:-1]
lowerCamelCase_ = [tuple(merge.split()[:2] ) for merge in merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return len(self.decoder )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCamelCase_ = '\n' + BPE_TOKEN_MERGES
if word.endswith(A_ ):
lowerCamelCase_ = word.replace(A_ , '' )
lowerCamelCase_ = word.replace(' ' , A_ )
lowerCamelCase_ = word
return word
def a__ ( self : Dict , A_ : Any ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
lowerCamelCase_ = text.lower()
lowerCamelCase_ = text.split()
lowerCamelCase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def a__ ( self : List[Any] , A_ : str ) -> int:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.decoder.get(A_ , self.unk_token )
return result
def a__ ( self : Tuple , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = ' '.join(A_ )
# make sure @@ tokens are concatenated
lowerCamelCase_ = ''.join(string.split(A_ ) )
return string
def a__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 651
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = [int(lowercase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(lowercase ) == 4 and all(0 <= int(lowercase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
lowerCamelCase : str = input().strip()
lowerCamelCase : List[str] = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 651
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 1
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 651
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 1
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 1
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = 42
class A( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , A_ : int = 16 , A_ : int = 88 , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : int = 1 , A_ : float = 0.0 , A_ : int = 32 , A_ : Optional[int] = None , A_ : bool = False , A_ : Optional[int] = None , A_ : str = "geglu" , A_ : bool = True , A_ : bool = True , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = num_attention_heads * attention_head_dim
lowerCamelCase_ = in_channels
lowerCamelCase_ = torch.nn.GroupNorm(num_groups=A_ , num_channels=A_ , eps=1E-6 , affine=A_ )
lowerCamelCase_ = nn.Linear(A_ , A_ )
# 3. Define transformers blocks
lowerCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
A_ , A_ , A_ , dropout=A_ , cross_attention_dim=A_ , activation_fn=A_ , attention_bias=A_ , double_self_attention=A_ , norm_elementwise_affine=A_ , )
for d in range(A_ )
] )
lowerCamelCase_ = nn.Linear(A_ , A_ )
def a__ ( self : Union[str, Any] , A_ : int , A_ : Union[str, Any]=None , A_ : str=None , A_ : Optional[Any]=None , A_ : str=1 , A_ : List[str]=None , A_ : bool = True , ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = hidden_states.shape
lowerCamelCase_ = batch_frames // num_frames
lowerCamelCase_ = hidden_states
lowerCamelCase_ = hidden_states[None, :].reshape(A_ , A_ , A_ , A_ , A_ )
lowerCamelCase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase_ = self.norm(A_ )
lowerCamelCase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , A_ , A_ )
lowerCamelCase_ = self.proj_in(A_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase_ = block(
A_ , encoder_hidden_states=A_ , timestep=A_ , cross_attention_kwargs=A_ , class_labels=A_ , )
# 3. Output
lowerCamelCase_ = self.proj_out(A_ )
lowerCamelCase_ = (
hidden_states[None, None, :]
.reshape(A_ , A_ , A_ , A_ , A_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase_ = hidden_states.reshape(A_ , A_ , A_ , A_ )
lowerCamelCase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=A_ )
| 651
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 1
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = "Hello, World!"
lowerCamelCase : Union[str, Any] = "en_XX"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : bool ):
'''simple docstring'''
lowerCamelCase_ = Path('data_bin' )
lowerCamelCase_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(lowercase ).parent ) , checkpoint_file=Path(lowercase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(lowercase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(lowercase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(lowercase )
lowerCamelCase_ = xmod.model.encoder.sentence_encoder
lowerCamelCase_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , lowercase )
lowerCamelCase_ = XmodForSequenceClassification(lowercase ) if classification_head else XmodForMaskedLM(lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ = model.roberta.encoder.layer[i]
lowerCamelCase_ = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
lowerCamelCase_ = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
lowerCamelCase_ = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
lowerCamelCase_ = xmod_layer.fca.weight
lowerCamelCase_ = xmod_layer.fca.bias
# output
lowerCamelCase_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
lowerCamelCase_ = xmod_layer.fca.weight
lowerCamelCase_ = xmod_layer.fca.bias
lowerCamelCase_ = xmod_layer.final_layer_norm.weight
lowerCamelCase_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ = bert_output.adapter_modules[lang_code]
lowerCamelCase_ = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ = from_adapter.fca.weight
lowerCamelCase_ = from_adapter.fca.bias
lowerCamelCase_ = from_adapter.fca.weight
lowerCamelCase_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ = xmod.model.classification_heads['mnli'].dense.weight
lowerCamelCase_ = xmod.model.classification_heads['mnli'].dense.bias
lowerCamelCase_ = xmod.model.classification_heads['mnli'].out_proj.weight
lowerCamelCase_ = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCamelCase_ = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ = xmod.model.encoder.lm_head.weight
lowerCamelCase_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ = xmod.encode(lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(lowercase )
lowerCamelCase_ = model(lowercase )[0]
if classification_head:
lowerCamelCase_ = xmod.model.classification_heads['mnli'](xmod.extract_features(lowercase ) )
else:
lowerCamelCase_ = xmod.model(lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ = torch.allclose(lowercase , lowercase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 651
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 1
|
from functools import reduce
lowerCamelCase : List[str] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _SCREAMING_SNAKE_CASE ( lowercase : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase , lowercase : str(int(lowercase ) * int(lowercase ) ) , n[i : i + 13] ) )
for i in range(len(lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 651
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 1
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str = "cpu" , lowercase : Union[str, None] = None ):
'''simple docstring'''
lowerCamelCase_ = torch.load(lowercase , map_location=lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowercase , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
lowerCamelCase_ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase_ = src_path
torch.save(lowercase , lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 651
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.