code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self ) -> Tuple:
__lowerCAmelCase = psutil.Process()
__lowerCAmelCase = False
def A__ ( self ) -> Any:
__lowerCAmelCase = -1
while True:
__lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = True
__lowerCAmelCase = threading.Thread(target=self.peak_monitor )
__lowerCAmelCase = True
self.thread.start()
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
SCREAMING_SNAKE_CASE_ = PeakCPUMemory()
def lowercase ():
# Time
__lowerCAmelCase = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = torch.cuda.memory_allocated(_lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase (_lowerCAmelCase ):
# Time
__lowerCAmelCase = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
__lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = (torch.cuda.memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**20
__lowerCAmelCase = (torch.cuda.max_memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**20
return measures
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(_lowerCAmelCase )]:.2f}MiB""" )
__lowerCAmelCase = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 301
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = KandinskyInpaintPipeline
_snake_case = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_snake_case = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_snake_case = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case = False
@property
def A__ ( self ) -> Optional[int]:
return 32
@property
def A__ ( self ) -> Optional[Any]:
return 32
@property
def A__ ( self ) -> Any:
return self.time_input_dim
@property
def A__ ( self ) -> Dict:
return self.time_input_dim * 4
@property
def A__ ( self ) -> Union[str, Any]:
return 100
@property
def A__ ( self ) -> Any:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__lowerCAmelCase = MultilingualCLIP(snake_case_ )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**snake_case_ )
return model
@property
def A__ ( self ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case_ , )
__lowerCAmelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , snake_case_ , snake_case_=0 ) -> Tuple:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__lowerCAmelCase = np.ones((64, 64) , dtype=np.floataa )
__lowerCAmelCase = 0
if str(snake_case_ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**snake_case_ )
__lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(snake_case_ ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A__ ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = np.ones((768, 768) , dtype=np.floataa )
__lowerCAmelCase = 0
__lowerCAmelCase = """a hat"""
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
__lowerCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__lowerCAmelCase = pipeline(
snake_case_ , image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 301
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = ['''image_processor''', '''tokenizer''']
_snake_case = '''BlipImageProcessor'''
_snake_case = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , snake_case_ , snake_case_ ) -> Tuple:
__lowerCAmelCase = False
super().__init__(snake_case_ , snake_case_ )
__lowerCAmelCase = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__lowerCAmelCase = self.tokenizer
__lowerCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
__lowerCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
__lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def A__ ( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def A__ ( self , *snake_case_ , **snake_case_ ) -> int:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def A__ ( self ) -> Tuple:
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 1
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = 8
# DPR tok
__lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__lowerCAmelCase = os.path.join(snake_case_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__lowerCAmelCase = os.path.join(snake_case_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(snake_case_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def A__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def A__ ( self ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def A__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def A__ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> Any:
__lowerCAmelCase = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_dataset()
__lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
__lowerCAmelCase = dataset
__lowerCAmelCase = RagRetriever(
snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A__ ( self , snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_dataset()
__lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
__lowerCAmelCase = os.path.join(self.tmpdirname , """dataset""" )
__lowerCAmelCase = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
__lowerCAmelCase = RagRetriever(
snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCAmelCase = RagRetriever(
snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case_ ) , )
return retriever
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCAmelCase = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
__lowerCAmelCase = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
__lowerCAmelCase = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(snake_case_ , open(snake_case_ , """wb""" ) )
__lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
__lowerCAmelCase = RagRetriever(
snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever.retrieve(snake_case_ , n_docs=snake_case_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , snake_case_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
__lowerCAmelCase = self.get_dummy_dataset()
retriever.save_pretrained(snake_case_ )
__lowerCAmelCase = RagRetriever.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever.retrieve(snake_case_ , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever.retrieve(snake_case_ , n_docs=snake_case_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , snake_case_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) -> str:
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case_ )
__lowerCAmelCase = RagRetriever.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever.retrieve(snake_case_ , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever.retrieve(snake_case_ , n_docs=snake_case_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , snake_case_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case_ )
__lowerCAmelCase = RagRetriever.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever.retrieve(snake_case_ , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_legacy_index_retriever()
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever.retrieve(snake_case_ , n_docs=snake_case_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , snake_case_ )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case_ )
__lowerCAmelCase = RagRetriever.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever.retrieve(snake_case_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self ) -> Dict:
import torch
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCAmelCase = [[5, 7], [10, 11]]
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever(snake_case_ , snake_case_ , prefix=retriever.config.generator.prefix , n_docs=snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(snake_case_ , np.ndarray )
__lowerCAmelCase = retriever(
snake_case_ , snake_case_ , prefix=retriever.config.generator.prefix , n_docs=snake_case_ , return_tensors="""pt""" , )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case_ , torch.Tensor )
self.assertIsInstance(snake_case_ , torch.Tensor )
self.assertIsInstance(snake_case_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
retriever.set_ctx_encoder_tokenizer(snake_case_ )
__lowerCAmelCase = [[5, 7], [10, 11]]
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever(snake_case_ , snake_case_ , prefix=retriever.config.generator.prefix , n_docs=snake_case_ )
self.assertEqual(
len(snake_case_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , snake_case_ ) # check for doc token related keys in dictionary.
| 301
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , ) -> Dict:
super().__init__()
self.register_modules(transformer=snake_case_ , vae=snake_case_ , scheduler=snake_case_ )
# create a imagenet -> id dictionary for easier use
__lowerCAmelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
__lowerCAmelCase = int(snake_case_ )
__lowerCAmelCase = dict(sorted(self.labels.items() ) )
def A__ ( self , snake_case_ ) -> List[int]:
if not isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = list(snake_case_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ = 4.0 , snake_case_ = None , snake_case_ = 50 , snake_case_ = "pil" , snake_case_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = self.transformer.config.sample_size
__lowerCAmelCase = self.transformer.config.in_channels
__lowerCAmelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case_ , device=self.device , dtype=self.transformer.dtype , )
__lowerCAmelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowerCAmelCase = torch.tensor(snake_case_ , device=self.device ).reshape(-1 )
__lowerCAmelCase = torch.tensor([1_000] * batch_size , device=self.device )
__lowerCAmelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowerCAmelCase = latent_model_input[: len(snake_case_ ) // 2]
__lowerCAmelCase = torch.cat([half, half] , dim=0 )
__lowerCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
__lowerCAmelCase = t
if not torch.is_tensor(snake_case_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowerCAmelCase = latent_model_input.device.type == """mps"""
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = torch.floataa if is_mps else torch.floataa
else:
__lowerCAmelCase = torch.intaa if is_mps else torch.intaa
__lowerCAmelCase = torch.tensor([timesteps] , dtype=snake_case_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCAmelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowerCAmelCase = self.transformer(
snake_case_ , timestep=snake_case_ , class_labels=snake_case_ ).sample
# perform guidance
if guidance_scale > 1:
__lowerCAmelCase , __lowerCAmelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowerCAmelCase , __lowerCAmelCase = torch.split(snake_case_ , len(snake_case_ ) // 2 , dim=0 )
__lowerCAmelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowerCAmelCase = torch.cat([half_eps, half_eps] , dim=0 )
__lowerCAmelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowerCAmelCase , __lowerCAmelCase = torch.split(snake_case_ , snake_case_ , dim=1 )
else:
__lowerCAmelCase = noise_pred
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
if guidance_scale > 1:
__lowerCAmelCase , __lowerCAmelCase = latent_model_input.chunk(2 , dim=0 )
else:
__lowerCAmelCase = latent_model_input
__lowerCAmelCase = 1 / self.vae.config.scaling_factor * latents
__lowerCAmelCase = self.vae.decode(snake_case_ ).sample
__lowerCAmelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCAmelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case_ )
| 301
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )
def lowercase (_lowerCAmelCase = 20 ):
__lowerCAmelCase = 1
for i in range(1 , n + 1 ):
__lowerCAmelCase = lcm(_lowerCAmelCase , _lowerCAmelCase )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 301
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = ['''image_processor''', '''tokenizer''']
_snake_case = '''ViTImageProcessor'''
_snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> Tuple:
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case_ , )
__lowerCAmelCase = kwargs.pop("""feature_extractor""" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ) -> Optional[Any]:
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
__lowerCAmelCase = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None:
__lowerCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
__lowerCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None and images is not None:
__lowerCAmelCase = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__lowerCAmelCase = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def A__ ( self , *snake_case_ , **snake_case_ ) -> Tuple:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def A__ ( self , *snake_case_ , **snake_case_ ) -> Any:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def A__ ( self ) -> List[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , snake_case_ , )
return self.image_processor_class
@property
def A__ ( self ) -> List[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , snake_case_ , )
return self.image_processor
| 301
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = len(_lowerCAmelCase )
# We need to create solution object to save path.
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )]
__lowerCAmelCase = run_maze(_lowerCAmelCase , 0 , 0 , _lowerCAmelCase )
if solved:
print("""\n""".join(str(_lowerCAmelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = len(_lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
__lowerCAmelCase = 1
return True
__lowerCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCAmelCase = 1
# check for directions
if (
run_maze(_lowerCAmelCase , i + 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j + 1 , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , i - 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j - 1 , _lowerCAmelCase )
):
return True
__lowerCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 1
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowercase ():
__lowerCAmelCase = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
__lowerCAmelCase = Dataset.from_dict(_lowerCAmelCase )
return dataset
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def A__ ( self ) -> Tuple:
__lowerCAmelCase = get_dataset()
__lowerCAmelCase = make_duplicate_clusters(snake_case_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = get_dataset()
__lowerCAmelCase , __lowerCAmelCase = deduplicate_dataset(snake_case_ )
self.assertEqual(len(snake_case_ ) , 2 )
print(snake_case_ )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , snake_case_ )
| 301
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Dict:
__lowerCAmelCase = parent
__lowerCAmelCase = 13
__lowerCAmelCase = 7
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = 99
__lowerCAmelCase = 384
__lowerCAmelCase = 2
__lowerCAmelCase = 4
__lowerCAmelCase = 37
__lowerCAmelCase = """gelu"""
__lowerCAmelCase = 0.1
__lowerCAmelCase = 0.1
__lowerCAmelCase = 512
__lowerCAmelCase = 16
__lowerCAmelCase = 2
__lowerCAmelCase = 0.02
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = 128
__lowerCAmelCase = 2
__lowerCAmelCase = 9
__lowerCAmelCase = 1
__lowerCAmelCase = None
def A__ ( self ) -> str:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
__lowerCAmelCase = TFConvBertModel(config=snake_case_ )
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(snake_case_ )
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
__lowerCAmelCase = TFConvBertForMaskedLM(config=snake_case_ )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFConvBertForSequenceClassification(config=snake_case_ )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = TFConvBertForMultipleChoice(config=snake_case_ )
__lowerCAmelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFConvBertForTokenClassification(config=snake_case_ )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = TFConvBertForQuestionAnswering(config=snake_case_ )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> int:
__lowerCAmelCase = TFConvBertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def A__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = True
if hasattr(snake_case_ , """use_cache""" ):
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowerCAmelCase = getattr(self.model_tester , """key_length""" , snake_case_ )
for model_class in self.all_model_classes:
__lowerCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ )
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
__lowerCAmelCase = os.path.join(snake_case_ , """saved_model""" , """1""" )
__lowerCAmelCase = tf.keras.models.load_model(snake_case_ )
__lowerCAmelCase = model(snake_case_ )
if self.is_encoder_decoder:
__lowerCAmelCase = outputs["""encoder_hidden_states"""]
__lowerCAmelCase = outputs["""encoder_attentions"""]
else:
__lowerCAmelCase = outputs["""hidden_states"""]
__lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(snake_case_ ) , snake_case_ )
__lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def A__ ( self ) -> Tuple:
__lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowerCAmelCase = getattr(self.model_tester , """key_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """key_length""" , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
__lowerCAmelCase = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
__lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ) -> Any:
__lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase = model(snake_case_ )[0]
__lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1e-4 )
| 301
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''falcon'''
_snake_case = ['''past_key_values''']
def __init__( self , snake_case_=65_024 , snake_case_=4_544 , snake_case_=32 , snake_case_=71 , snake_case_=1e-5 , snake_case_=0.02 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=11 , snake_case_=11 , **snake_case_ , ) -> Tuple:
__lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__lowerCAmelCase = kwargs.pop("""n_embed""" , snake_case_ )
__lowerCAmelCase = hidden_size if n_embed is None else n_embed
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
__lowerCAmelCase = alibi
__lowerCAmelCase = new_decoder_architecture
__lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
__lowerCAmelCase = parallel_attn
__lowerCAmelCase = bias
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
@property
def A__ ( self ) -> int:
return self.hidden_size // self.num_attention_heads
@property
def A__ ( self ) -> Any:
return not self.alibi
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCAmelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(snake_case_ ) , torch_builtin(snake_case_ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case_ ) , gelu_new(snake_case_ ) ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCAmelCase = get_activation("""gelu""" )
__lowerCAmelCase = get_activation("""gelu_10""" )
__lowerCAmelCase = torch_builtin(snake_case_ )
__lowerCAmelCase = geluaa(snake_case_ )
__lowerCAmelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A__ ( self ) -> str:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(snake_case_ ):
get_activation("""bogus""" )
with self.assertRaises(snake_case_ ):
get_activation(snake_case_ )
def A__ ( self ) -> Dict:
__lowerCAmelCase = get_activation("""gelu""" )
__lowerCAmelCase = 1
__lowerCAmelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case_ ):
__lowerCAmelCase = acta.a
| 301
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 1000 ):
__lowerCAmelCase = -1
__lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCAmelCase = a * b * c
if candidate >= product:
__lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(F"{solution() = }")
| 301
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 1
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE_ = F"https://www.google.com/search?q={query}&num=100"
SCREAMING_SNAKE_CASE_ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE_ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE_ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 301
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 1
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase ():
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , snake_case_ , )
@cached_property
def A__ ( self ) -> "torch.device":
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase = torch.device("""cpu""" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("""cuda""" , snake_case_ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case_ )
return device
@property
def A__ ( self ) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> Tuple:
return False
| 301
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase (_lowerCAmelCase ):
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , _lowerCAmelCase , )
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
__lowerCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__lowerCAmelCase = np.concatenate(_lowerCAmelCase , axis=0 )
__lowerCAmelCase = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
__lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 )
__lowerCAmelCase = 2.0 * image - 1.0
__lowerCAmelCase = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 )
return image
def lowercase (_lowerCAmelCase ):
if isinstance(_lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
__lowerCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = mask[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
__lowerCAmelCase = np.concatenate(_lowerCAmelCase , axis=0 )
__lowerCAmelCase = mask.astype(np.floataa ) / 255.0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = torch.from_numpy(_lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 )
return mask
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = 42
_snake_case = 42
def __init__( self , snake_case_ , snake_case_ ) -> Tuple:
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ , snake_case_ = 250 , snake_case_ = 0.0 , snake_case_ = 10 , snake_case_ = 10 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
__lowerCAmelCase = image
__lowerCAmelCase = _preprocess_image(snake_case_ )
__lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = _preprocess_mask(snake_case_ )
__lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(snake_case_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = original_image.shape
__lowerCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(snake_case_ , snake_case_ , snake_case_ , self.device )
__lowerCAmelCase = eta
__lowerCAmelCase = self.scheduler.timesteps[0] + 1
__lowerCAmelCase = generator[0] if isinstance(snake_case_ , snake_case_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase = self.scheduler.undo_step(snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = t
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 301
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
| 1
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
| 1
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
SCREAMING_SNAKE_CASE_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = SavedModel()
__lowerCAmelCase = []
with open(os.path.join(_lowerCAmelCase , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_lowerCAmelCase )] )
with open(_lowerCAmelCase , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase = sorted(_lowerCAmelCase )
__lowerCAmelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_lowerCAmelCase )
if strict and len(_lowerCAmelCase ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_lowerCAmelCase ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*_lowerCAmelCase , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ) -> None:
__lowerCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCAmelCase = {}
__lowerCAmelCase = source_vertex
def A__ ( self ) -> None:
__lowerCAmelCase = {self.source_vertex}
__lowerCAmelCase = None
__lowerCAmelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case_ )
__lowerCAmelCase = vertex
queue.append(snake_case_ )
def A__ ( self , snake_case_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCAmelCase = self.parent.get(snake_case_ )
if target_vertex_parent is None:
__lowerCAmelCase = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case_ )
return self.shortest_path(snake_case_ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 301
|
"""simple docstring"""
import sys
import turtle
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ ) -> Any:
__lowerCAmelCase = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(snake_case_ ) != 0:
__lowerCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case_ ) != cols:
raise error
for value in row:
if not isinstance(snake_case_ , (int, float) ):
raise error
__lowerCAmelCase = rows
else:
__lowerCAmelCase = []
def A__ ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A__ ( self ) -> int:
return len(self.rows )
@property
def A__ ( self ) -> int:
return len(self.rows[0] )
@property
def A__ ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def A__ ( self ) -> bool:
return self.order[0] == self.order[1]
def A__ ( self ) -> Matrix:
__lowerCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case_ )
def A__ ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A__ ( self ) -> bool:
return bool(self.determinant() )
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case_ ).determinant()
def A__ ( self , snake_case_ , snake_case_ ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(snake_case_ , snake_case_ )
return -1 * self.get_minor(snake_case_ , snake_case_ )
def A__ ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(snake_case_ , snake_case_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A__ ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A__ ( self ) -> Matrix:
__lowerCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case_ )
def A__ ( self ) -> Matrix:
__lowerCAmelCase = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(snake_case_ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def A__ ( self , snake_case_ , snake_case_ = None ) -> None:
__lowerCAmelCase = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(snake_case_ , snake_case_ ):
raise type_error
for value in row:
if not isinstance(snake_case_ , (int, float) ):
raise type_error
if len(snake_case_ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(snake_case_ )
else:
__lowerCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def A__ ( self , snake_case_ , snake_case_ = None ) -> None:
__lowerCAmelCase = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(snake_case_ , snake_case_ ):
raise type_error
for value in column:
if not isinstance(snake_case_ , (int, float) ):
raise type_error
if len(snake_case_ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
__lowerCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__lowerCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case_ ) -> bool:
if not isinstance(snake_case_ , snake_case_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case_ ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , snake_case_ ) -> Matrix:
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case_ ) -> Matrix:
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case_ ) -> Matrix:
if isinstance(snake_case_ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case_ , snake_case_ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(snake_case_ , snake_case_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , snake_case_ ) -> Matrix:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
__lowerCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A__ ( cls , snake_case_ , snake_case_ ) -> int:
return sum(row[i] * column[i] for i in range(len(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = ShapEImgaImgPipeline
_snake_case = ['''image''']
_snake_case = ['''image''']
_snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_snake_case = False
@property
def A__ ( self ) -> Optional[int]:
return 32
@property
def A__ ( self ) -> Dict:
return 32
@property
def A__ ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def A__ ( self ) -> Optional[Any]:
return 8
@property
def A__ ( self ) -> Tuple:
torch.manual_seed(0 )
__lowerCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase = CLIPVisionModel(snake_case_ )
return model
@property
def A__ ( self ) -> Tuple:
__lowerCAmelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=snake_case_ , do_normalize=snake_case_ , do_resize=snake_case_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def A__ ( self ) -> str:
torch.manual_seed(0 )
__lowerCAmelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCAmelCase = PriorTransformer(**snake_case_ )
return model
@property
def A__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase = ShapERenderer(**snake_case_ )
return model
def A__ ( self ) -> Tuple:
__lowerCAmelCase = self.dummy_prior
__lowerCAmelCase = self.dummy_image_encoder
__lowerCAmelCase = self.dummy_image_processor
__lowerCAmelCase = self.dummy_renderer
__lowerCAmelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=snake_case_ , clip_sample=snake_case_ , clip_sample_range=1.0 , )
__lowerCAmelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def A__ ( self , snake_case_ , snake_case_=0 ) -> List[Any]:
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
if str(snake_case_ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def A__ ( self ) -> str:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**snake_case_ )
__lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(snake_case_ ) )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> Optional[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = torch_device == """cpu"""
__lowerCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case_ , relax_max_difference=snake_case_ , )
def A__ ( self ) -> Dict:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**snake_case_ )
__lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase = batch_size * [inputs[key]]
__lowerCAmelCase = pipe(**snake_case_ , num_images_per_prompt=snake_case_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Any:
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCAmelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(0 )
__lowerCAmelCase = pipe(
snake_case_ , generator=snake_case_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 301
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 1
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 1
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE_ = '''pt'''
elif is_tf_available():
SCREAMING_SNAKE_CASE_ = '''tf'''
else:
SCREAMING_SNAKE_CASE_ = '''jax'''
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = PerceiverTokenizer
_snake_case = False
def A__ ( self ) -> Optional[Any]:
super().setUp()
__lowerCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self ) -> Tuple:
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def A__ ( self , **snake_case_ ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_=False , snake_case_=20 , snake_case_=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCAmelCase = []
for i in range(len(snake_case_ ) ):
try:
__lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCAmelCase = list(filter(lambda snake_case_ : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , snake_case_ ) )
__lowerCAmelCase = list(filter(lambda snake_case_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case_ ) , snake_case_ ) )
if max_length is not None and len(snake_case_ ) > max_length:
__lowerCAmelCase = toks[:max_length]
if min_length is not None and len(snake_case_ ) < min_length and len(snake_case_ ) > 0:
while len(snake_case_ ) < min_length:
__lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
if " " not in output_txt and len(snake_case_ ) > 1:
__lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case_ )
)
if with_prefix_space:
__lowerCAmelCase = """ """ + output_txt
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
return output_txt, output_ids
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = self.perceiver_tokenizer
__lowerCAmelCase = """Unicode €."""
__lowerCAmelCase = tokenizer(snake_case_ )
__lowerCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["""input_ids"""] , snake_case_ )
# decoding
__lowerCAmelCase = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , """[CLS]Unicode €.[SEP]""" )
__lowerCAmelCase = tokenizer("""e è é ê ë""" )
__lowerCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["""input_ids"""] , snake_case_ )
# decoding
__lowerCAmelCase = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def A__ ( self ) -> Any:
__lowerCAmelCase = self.perceiver_tokenizer
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__lowerCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
if FRAMEWORK != "jax":
__lowerCAmelCase = list(batch.input_ids.numpy()[0] )
else:
__lowerCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A__ ( self ) -> Any:
__lowerCAmelCase = self.perceiver_tokenizer
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors=snake_case_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""decoder_input_ids""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.perceiver_tokenizer
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
__lowerCAmelCase = tokenizer(
text_target=snake_case_ , max_length=32 , padding="""max_length""" , truncation=snake_case_ , return_tensors=snake_case_ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def A__ ( self ) -> int:
# safety check on max_len default value so we are sure the test works
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
tokenizer.save_pretrained(snake_case_ )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ )
__lowerCAmelCase = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
shutil.rmtree(snake_case_ )
__lowerCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__lowerCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
tokenizer.save_pretrained(snake_case_ )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ )
__lowerCAmelCase = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCAmelCase = json.load(snake_case_ )
with open(os.path.join(snake_case_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCAmelCase = json.load(snake_case_ )
__lowerCAmelCase = [f"""<extra_id_{i}>""" for i in range(125 )]
__lowerCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__lowerCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(snake_case_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(snake_case_ , snake_case_ )
with open(os.path.join(snake_case_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(snake_case_ , snake_case_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCAmelCase = tokenizer_class.from_pretrained(
snake_case_ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCAmelCase = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=snake_case_ )]
__lowerCAmelCase = tokenizer_class.from_pretrained(
snake_case_ , additional_special_tokens=snake_case_ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def A__ ( self ) -> Tuple:
__lowerCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , """�""" )
def A__ ( self ) -> str:
pass
def A__ ( self ) -> List[Any]:
pass
def A__ ( self ) -> Union[str, Any]:
pass
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> List[str]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__lowerCAmelCase = self.get_tokenizers(fast=snake_case_ , do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
__lowerCAmelCase = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
| 301
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase ():
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 1
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''LayoutLMv3FeatureExtractor''']
SCREAMING_SNAKE_CASE_ = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 1
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
SCREAMING_SNAKE_CASE_ = True
except ImportError:
SCREAMING_SNAKE_CASE_ = False
try:
from torch.hub import _get_torch_home
SCREAMING_SNAKE_CASE_ = _get_torch_home()
except ImportError:
SCREAMING_SNAKE_CASE_ = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
SCREAMING_SNAKE_CASE_ = os.path.join(torch_cache_home, '''transformers''')
SCREAMING_SNAKE_CASE_ = '''https://cdn.huggingface.co'''
SCREAMING_SNAKE_CASE_ = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
SCREAMING_SNAKE_CASE_ = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
SCREAMING_SNAKE_CASE_ = os.path.join(PATH, '''config.yaml''')
SCREAMING_SNAKE_CASE_ = os.path.join(PATH, '''attributes.txt''')
SCREAMING_SNAKE_CASE_ = os.path.join(PATH, '''objects.txt''')
SCREAMING_SNAKE_CASE_ = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
SCREAMING_SNAKE_CASE_ = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
SCREAMING_SNAKE_CASE_ = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
SCREAMING_SNAKE_CASE_ = '''pytorch_model.bin'''
SCREAMING_SNAKE_CASE_ = '''config.yaml'''
def lowercase (_lowerCAmelCase=OBJECTS , _lowerCAmelCase=ATTRIBUTES ):
__lowerCAmelCase = []
with open(_lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__lowerCAmelCase = []
with open(_lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = OrderedDict()
with open(_lowerCAmelCase , """rb""" ) as f:
__lowerCAmelCase = pkl.load(_lowerCAmelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowerCAmelCase = ckp.pop(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , np.ndarray ):
__lowerCAmelCase = torch.tensor(_lowerCAmelCase )
else:
assert isinstance(_lowerCAmelCase , torch.tensor ), type(_lowerCAmelCase )
__lowerCAmelCase = v
return r
class lowerCAmelCase_ :
'''simple docstring'''
_snake_case = {}
def __init__( self , snake_case_ , snake_case_ = "root" , snake_case_=0 ) -> Dict:
__lowerCAmelCase = name
__lowerCAmelCase = level
__lowerCAmelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowerCAmelCase = copy.deepcopy(snake_case_ )
__lowerCAmelCase = copy.deepcopy(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = Config(snake_case_ , name=snake_case_ , level=level + 1 )
__lowerCAmelCase = v
setattr(self , snake_case_ , snake_case_ )
__lowerCAmelCase = d
def __repr__( self ) -> Union[str, Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self , snake_case_ , snake_case_ ) -> Optional[int]:
__lowerCAmelCase = val
__lowerCAmelCase = val
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = len(snake_case_ ) - 1
__lowerCAmelCase = self._pointer
if len(snake_case_ ) > 1:
for i, l in enumerate(snake_case_ ):
if hasattr(self , snake_case_ ) and isinstance(getattr(self , snake_case_ ) , snake_case_ ):
setattr(getattr(self , snake_case_ ) , """.""".join(levels[i:] ) , snake_case_ )
if l == last_level:
__lowerCAmelCase = val
else:
__lowerCAmelCase = pointer[l]
def A__ ( self ) -> List[str]:
return self._pointer
def A__ ( self , snake_case_ , snake_case_ ) -> List[str]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(snake_case_ , snake_case_ )
def A__ ( self , snake_case_ , snake_case_ ) -> Dict:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(snake_case_ , snake_case_ )
@staticmethod
def A__ ( snake_case_ ) -> str:
with open(snake_case_ ) as stream:
__lowerCAmelCase = load(snake_case_ , Loader=snake_case_ )
return data
def __str__( self ) -> str:
__lowerCAmelCase = """ """
if self._name != "root":
__lowerCAmelCase = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__lowerCAmelCase = """"""
__lowerCAmelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(snake_case_ , snake_case_ ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(snake_case_ ).__name__})\n"""
__lowerCAmelCase = level
return r[:-1]
@classmethod
def A__ ( cls , snake_case_ , **snake_case_ ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(snake_case_ , **snake_case_ )
return cls(snake_case_ )
@classmethod
def A__ ( cls , snake_case_ , **snake_case_ ) -> str:
__lowerCAmelCase = kwargs.pop("""cache_dir""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""force_download""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""resume_download""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""proxies""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""local_files_only""" , snake_case_ )
if os.path.isdir(snake_case_ ):
__lowerCAmelCase = os.path.join(snake_case_ , snake_case_ )
elif os.path.isfile(snake_case_ ) or is_remote_url(snake_case_ ):
__lowerCAmelCase = pretrained_model_name_or_path
else:
__lowerCAmelCase = hf_bucket_url(snake_case_ , filename=snake_case_ , use_cdn=snake_case_ )
try:
# Load from URL or cache if already cached
__lowerCAmelCase = cached_path(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowerCAmelCase = Config.load_yaml(snake_case_ )
except EnvironmentError:
__lowerCAmelCase = """Can't load config for"""
raise EnvironmentError(snake_case_ )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(snake_case_ ), kwargs
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = torch.load("""dump.pt""" , map_location=in_tensor.device )
__lowerCAmelCase = in_tensor.numpy()
__lowerCAmelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowerCAmelCase , _lowerCAmelCase , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(_lowerCAmelCase , _lowerCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = urlparse(_lowerCAmelCase )
return parsed.scheme in ("http", "https")
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ):
__lowerCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowerCAmelCase = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=0 , _lowerCAmelCase=None , ):
__lowerCAmelCase = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
ua += "; " + "; ".join("""{}/{}""".format(_lowerCAmelCase , _lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
ua += "; " + user_agent
__lowerCAmelCase = {"""user-agent""": ua}
if resume_size > 0:
__lowerCAmelCase = """bytes=%d-""" % (resume_size,)
__lowerCAmelCase = requests.get(_lowerCAmelCase , stream=_lowerCAmelCase , proxies=_lowerCAmelCase , headers=_lowerCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
__lowerCAmelCase = response.headers.get("""Content-Length""" )
__lowerCAmelCase = resume_size + int(_lowerCAmelCase ) if content_length is not None else None
__lowerCAmelCase = tqdm(
unit="""B""" , unit_scale=_lowerCAmelCase , total=_lowerCAmelCase , initial=_lowerCAmelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowerCAmelCase ) )
temp_file.write(_lowerCAmelCase )
progress.close()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=10 , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=False , ):
if cache_dir is None:
__lowerCAmelCase = TRANSFORMERS_CACHE
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = str(_lowerCAmelCase )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowerCAmelCase = None
if not local_files_only:
try:
__lowerCAmelCase = requests.head(_lowerCAmelCase , allow_redirects=_lowerCAmelCase , proxies=_lowerCAmelCase , timeout=_lowerCAmelCase )
if response.status_code == 200:
__lowerCAmelCase = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowerCAmelCase = url_to_filename(_lowerCAmelCase , _lowerCAmelCase )
# get cache path to put the file
__lowerCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowerCAmelCase ):
return cache_path
else:
__lowerCAmelCase = [
file
for file in fnmatch.filter(os.listdir(_lowerCAmelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_lowerCAmelCase ) > 0:
return os.path.join(_lowerCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowerCAmelCase = cache_path + """.lock"""
with FileLock(_lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowerCAmelCase = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(_lowerCAmelCase , """a+b""" ) as f:
yield f
__lowerCAmelCase = _resumable_file_manager
if os.path.exists(_lowerCAmelCase ):
__lowerCAmelCase = os.stat(_lowerCAmelCase ).st_size
else:
__lowerCAmelCase = 0
else:
__lowerCAmelCase = partial(tempfile.NamedTemporaryFile , dir=_lowerCAmelCase , delete=_lowerCAmelCase )
__lowerCAmelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , _lowerCAmelCase , temp_file.name , )
http_get(
_lowerCAmelCase , _lowerCAmelCase , proxies=_lowerCAmelCase , resume_size=_lowerCAmelCase , user_agent=_lowerCAmelCase , )
os.replace(temp_file.name , _lowerCAmelCase )
__lowerCAmelCase = {"""url""": url, """etag""": etag}
__lowerCAmelCase = cache_path + """.json"""
with open(_lowerCAmelCase , """w""" ) as meta_file:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
return cache_path
def lowercase (_lowerCAmelCase , _lowerCAmelCase=None ):
__lowerCAmelCase = url.encode("""utf-8""" )
__lowerCAmelCase = shaaaa(_lowerCAmelCase )
__lowerCAmelCase = url_hash.hexdigest()
if etag:
__lowerCAmelCase = etag.encode("""utf-8""" )
__lowerCAmelCase = shaaaa(_lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowercase (_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ):
if cache_dir is None:
__lowerCAmelCase = TRANSFORMERS_CACHE
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = str(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = str(_lowerCAmelCase )
if is_remote_url(_lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
__lowerCAmelCase = get_from_cache(
_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , proxies=_lowerCAmelCase , resume_download=_lowerCAmelCase , user_agent=_lowerCAmelCase , local_files_only=_lowerCAmelCase , )
elif os.path.exists(_lowerCAmelCase ):
# File, and it exists.
__lowerCAmelCase = url_or_filename
elif urlparse(_lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_lowerCAmelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(_lowerCAmelCase ) and not tarfile.is_tarfile(_lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowerCAmelCase , __lowerCAmelCase = os.path.split(_lowerCAmelCase )
__lowerCAmelCase = output_file.replace(""".""" , """-""" ) + """-extracted"""
__lowerCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.isdir(_lowerCAmelCase ) and os.listdir(_lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowerCAmelCase = output_path + """.lock"""
with FileLock(_lowerCAmelCase ):
shutil.rmtree(_lowerCAmelCase , ignore_errors=_lowerCAmelCase )
os.makedirs(_lowerCAmelCase )
if is_zipfile(_lowerCAmelCase ):
with ZipFile(_lowerCAmelCase , """r""" ) as zip_file:
zip_file.extractall(_lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(_lowerCAmelCase ):
__lowerCAmelCase = tarfile.open(_lowerCAmelCase )
tar_file.extractall(_lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_lowerCAmelCase ) )
return output_path_extracted
return output_path
def lowercase (_lowerCAmelCase , _lowerCAmelCase="," ):
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ):
with open(_lowerCAmelCase ) as f:
__lowerCAmelCase = eval(f.read() )
else:
__lowerCAmelCase = requests.get(_lowerCAmelCase )
try:
__lowerCAmelCase = requests.json()
except Exception:
__lowerCAmelCase = req.content.decode()
assert data is not None, "could not connect"
try:
__lowerCAmelCase = eval(_lowerCAmelCase )
except Exception:
__lowerCAmelCase = data.split("""\n""" )
req.close()
return data
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = requests.get(_lowerCAmelCase )
__lowerCAmelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowerCAmelCase )
with open(_lowerCAmelCase , """rb""" ) as stream:
__lowerCAmelCase = pkl.load(_lowerCAmelCase )
__lowerCAmelCase = weights.pop("""model""" )
__lowerCAmelCase = {}
for k, v in model.items():
__lowerCAmelCase = torch.from_numpy(_lowerCAmelCase )
if "running_var" in k:
__lowerCAmelCase = torch.tensor([0] )
__lowerCAmelCase = k.replace("""running_var""" , """num_batches_tracked""" )
__lowerCAmelCase = zero
return new
def lowercase ():
print(f"""{os.path.abspath(os.path.join(_lowerCAmelCase , os.pardir ) )}/demo.ipynb""" )
def lowercase (_lowerCAmelCase , _lowerCAmelCase="RGB" ):
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ):
__lowerCAmelCase = cva.imread(_lowerCAmelCase )
else:
__lowerCAmelCase = get_image_from_url(_lowerCAmelCase )
assert img is not None, f"""could not connect to: {im}"""
__lowerCAmelCase = cva.cvtColor(_lowerCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowerCAmelCase = img[:, :, ::-1]
return img
def lowercase (_lowerCAmelCase , _lowerCAmelCase=1 ):
return (images[i : i + batch] for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase ))
| 301
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 1
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
__lowerCAmelCase = False
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
if not self.initialized:
__lowerCAmelCase = RagRetriever(
snake_case_ , question_encoder_tokenizer=snake_case_ , generator_tokenizer=snake_case_ , index=snake_case_ , init_retrieval=snake_case_ , )
__lowerCAmelCase = True
def A__ ( self ) -> Optional[int]:
self.retriever.index.init_index()
def A__ ( self , snake_case_ , snake_case_ ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.retriever._main_retrieve(snake_case_ , snake_case_ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(snake_case_ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
snake_case_ , question_encoder_tokenizer=snake_case_ , generator_tokenizer=snake_case_ , index=snake_case_ , init_retrieval=snake_case_ , )
__lowerCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for worker in self.retrieval_workers
] )
def A__ ( self ) -> Tuple:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def A__ ( self , snake_case_ , snake_case_ ) -> int:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowerCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowerCAmelCase , __lowerCAmelCase = ray.get(random_worker.retrieve.remote(snake_case_ , snake_case_ ) )
else:
__lowerCAmelCase , __lowerCAmelCase = self._main_retrieve(snake_case_ , snake_case_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case_ )
@classmethod
def A__ ( cls , snake_case_ , snake_case_=None , **snake_case_ ) -> str:
return super(snake_case_ , cls ).get_tokenizers(snake_case_ , snake_case_ , **snake_case_ )
@classmethod
def A__ ( cls , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ) -> str:
__lowerCAmelCase = kwargs.pop("""config""" , snake_case_ ) or RagConfig.from_pretrained(snake_case_ , **snake_case_ )
__lowerCAmelCase = RagTokenizer.from_pretrained(snake_case_ , config=snake_case_ )
__lowerCAmelCase = rag_tokenizer.question_encoder
__lowerCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowerCAmelCase = """custom"""
__lowerCAmelCase = CustomHFIndex(config.retrieval_vector_size , snake_case_ )
else:
__lowerCAmelCase = cls._build_index(snake_case_ )
return cls(
snake_case_ , question_encoder_tokenizer=snake_case_ , generator_tokenizer=snake_case_ , retrieval_workers=snake_case_ , index=snake_case_ , )
| 301
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [True] * 1_000_001
SCREAMING_SNAKE_CASE_ = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
SCREAMING_SNAKE_CASE_ = False
i += 1
def lowercase (_lowerCAmelCase ):
return seive[n]
def lowercase (_lowerCAmelCase ):
return any(digit in """02468""" for digit in str(_lowerCAmelCase ) )
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_lowerCAmelCase ) and not contains_an_even_digit(_lowerCAmelCase ):
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCAmelCase ) )]
if all(is_prime(_lowerCAmelCase ) for i in list_nums ):
result.append(_lowerCAmelCase )
return result
def lowercase ():
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }")
| 301
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location="""cpu""" )
if "model" in sd.keys():
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
__lowerCAmelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
__lowerCAmelCase = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowerCAmelCase = sd.pop(_lowerCAmelCase )
__lowerCAmelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowerCAmelCase = sd[key]
# We split QKV in separate Q,K,V
__lowerCAmelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" )
__lowerCAmelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" )
__lowerCAmelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" )
__lowerCAmelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
__lowerCAmelCase = q
__lowerCAmelCase = k
__lowerCAmelCase = v
del sd[key]
return sd
@torch.no_grad()
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
__lowerCAmelCase = load_checkpoint(_lowerCAmelCase )
if config is not None:
__lowerCAmelCase = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
__lowerCAmelCase = OPTConfig()
__lowerCAmelCase = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 301
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 1
|
"""simple docstring"""
import sys
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = len(_lowerCAmelCase )
__lowerCAmelCase = [[0 for x in range(_lowerCAmelCase )] for x in range(_lowerCAmelCase )]
__lowerCAmelCase = [[0 for x in range(_lowerCAmelCase )] for x in range(_lowerCAmelCase )]
for chain_length in range(2 , _lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
__lowerCAmelCase = a + chain_length - 1
__lowerCAmelCase = sys.maxsize
for c in range(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCAmelCase = cost
__lowerCAmelCase = c
return matrix, sol
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if i == j:
print("""A""" + str(_lowerCAmelCase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(_lowerCAmelCase , _lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(_lowerCAmelCase , optimal_solution[i][j] + 1 , _lowerCAmelCase )
print(""")""" , end=""" """ )
def lowercase ():
__lowerCAmelCase = [30, 35, 15, 5, 10, 20, 25]
__lowerCAmelCase = len(_lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCAmelCase , __lowerCAmelCase = matrix_chain_order(_lowerCAmelCase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(_lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 301
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 1
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 1
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
SCREAMING_SNAKE_CASE_ = '''bert-base-cased'''
SCREAMING_SNAKE_CASE_ = '''google/pegasus-xsum'''
SCREAMING_SNAKE_CASE_ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
SCREAMING_SNAKE_CASE_ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
SCREAMING_SNAKE_CASE_ = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE_ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE_ = '''sshleifer/tiny-mbart'''
SCREAMING_SNAKE_CASE_ = '''sshleifer/tiny-marian-en-de'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = """\n""".join(_lowerCAmelCase )
Path(_lowerCAmelCase ).open("""w""" ).writelines(_lowerCAmelCase )
def lowercase (_lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_lowerCAmelCase , f"""{split}.source""" ) , _lowerCAmelCase )
_dump_articles(os.path.join(_lowerCAmelCase , f"""{split}.target""" ) , _lowerCAmelCase )
return tmp_dir
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A__ ( self , snake_case_ ) -> Dict:
__lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowerCAmelCase , __lowerCAmelCase = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
__lowerCAmelCase = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="""train""" , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , )
__lowerCAmelCase = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(snake_case_ , snake_case_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowerCAmelCase = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = LegacySeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="""train""" , max_source_length=20 , max_target_length=snake_case_ , )
__lowerCAmelCase = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A__ ( self ) -> str:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowerCAmelCase = tmp_dir.joinpath("""train.source""" ).open().readlines()
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(snake_case_ , snake_case_ , 128 , snake_case_ )
__lowerCAmelCase = {x.name for x in tmp_dir.iterdir()}
__lowerCAmelCase = {x.name for x in save_dir.iterdir()}
__lowerCAmelCase = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(snake_case_ ) < len(snake_case_ )
assert len(snake_case_ ) == 1
assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def A__ ( self ) -> Optional[int]:
if not FAIRSEQ_AVAILABLE:
return
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=64 )
__lowerCAmelCase = 64
__lowerCAmelCase = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ )
__lowerCAmelCase = [len(snake_case_ ) for x in batch_sampler]
assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples
__lowerCAmelCase = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = []
__lowerCAmelCase = []
for batch in data_loader:
__lowerCAmelCase = batch["""input_ids"""].shape
__lowerCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowerCAmelCase = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(snake_case_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(snake_case_ )
assert num_src_per_batch[0] == max(snake_case_ )
if failures:
raise AssertionError(f"""too many tokens in {len(snake_case_ )} batches""" )
def A__ ( self ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=512 )
__lowerCAmelCase = 2
__lowerCAmelCase = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ )
__lowerCAmelCase = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ )
__lowerCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(snake_case_ , snake_case_="input_ids" ):
return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(snake_case_ , k="""labels""" ) ) < sum(count_pad_tokens(snake_case_ , k="""labels""" ) )
assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ )
def A__ ( self , snake_case_=1_000 , snake_case_=128 ) -> Optional[int]:
if os.getenv("""USE_REAL_DATA""" , snake_case_ ):
__lowerCAmelCase = """examples/seq2seq/wmt_en_ro"""
__lowerCAmelCase = max_len * 2 * 64
if not Path(snake_case_ ).joinpath("""train.len""" ).exists():
save_len_file(snake_case_ , snake_case_ )
else:
__lowerCAmelCase = """examples/seq2seq/test_data/wmt_en_ro"""
__lowerCAmelCase = max_len * 4
save_len_file(snake_case_ , snake_case_ )
__lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
__lowerCAmelCase = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="""train""" , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , )
return ds, max_tokens, tokenizer
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset()
__lowerCAmelCase = set(DistributedSortishSampler(snake_case_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) )
__lowerCAmelCase = set(DistributedSortishSampler(snake_case_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) )
assert idsa.intersection(snake_case_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A__ ( self , snake_case_ ) -> Union[str, Any]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ )
if tok_name == MBART_TINY:
__lowerCAmelCase = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowerCAmelCase = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
| 301
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 400_0000 ):
__lowerCAmelCase = []
__lowerCAmelCase , __lowerCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = b, a + b
return sum(_lowerCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 301
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar('''KT''')
SCREAMING_SNAKE_CASE_ = TypeVar('''VT''')
class lowerCAmelCase_ ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , snake_case_ = "root" , snake_case_ = None ) -> Tuple:
__lowerCAmelCase = key
__lowerCAmelCase = value
__lowerCAmelCase = []
def __repr__( self ) -> str:
return f"""Node({self.key}: {self.value})"""
@property
def A__ ( self ) -> int:
return len(self.forward )
class lowerCAmelCase_ ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , snake_case_ = 0.5 , snake_case_ = 16 ) -> Union[str, Any]:
__lowerCAmelCase = Node[KT, VT]()
__lowerCAmelCase = 0
__lowerCAmelCase = p
__lowerCAmelCase = max_level
def __str__( self ) -> str:
__lowerCAmelCase = list(self )
if len(snake_case_ ) == 0:
return f"""SkipList(level={self.level})"""
__lowerCAmelCase = max((len(str(snake_case_ ) ) for item in items) , default=4 )
__lowerCAmelCase = max(snake_case_ , 4 ) + 4
__lowerCAmelCase = self.head
__lowerCAmelCase = []
__lowerCAmelCase = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(snake_case_ , """-""" ) + """* """ * len(snake_case_ ) )
lines.append(""" """ * label_size + """| """ * len(snake_case_ ) )
while len(node.forward ) != 0:
__lowerCAmelCase = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(snake_case_ , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(snake_case_ ) )
__lowerCAmelCase = node.forward
lines.append("""None""".ljust(snake_case_ ) + """* """ * len(snake_case_ ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(snake_case_ )
def __iter__( self ) -> Optional[int]:
__lowerCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowerCAmelCase = node.forward[0]
def A__ ( self ) -> int:
__lowerCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def A__ ( self , snake_case_ ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
__lowerCAmelCase = []
__lowerCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowerCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase = self._locate_node(snake_case_ )
if node is not None:
for i, update_node in enumerate(snake_case_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowerCAmelCase = node.forward[i]
else:
__lowerCAmelCase = update_node.forward[:i]
def A__ ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self._locate_node(snake_case_ )
if node is not None:
__lowerCAmelCase = value
else:
__lowerCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case_ ):
update_vector.append(self.head )
__lowerCAmelCase = level
__lowerCAmelCase = Node(snake_case_ , snake_case_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case_ )
else:
__lowerCAmelCase = new_node
def A__ ( self , snake_case_ ) -> VT | None:
__lowerCAmelCase , __lowerCAmelCase = self._locate_node(snake_case_ )
if node is not None:
return node.value
return None
def lowercase ():
__lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
__lowerCAmelCase = skip_list.head
__lowerCAmelCase = {}
while node.level != 0:
__lowerCAmelCase = node.forward[0]
__lowerCAmelCase = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowercase ():
__lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
__lowerCAmelCase = skip_list.head
__lowerCAmelCase = {}
while node.level != 0:
__lowerCAmelCase = node.forward[0]
__lowerCAmelCase = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowercase ():
__lowerCAmelCase = SkipList()
assert skip_list.find("""Some key""" ) is None
def lowercase ():
__lowerCAmelCase = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def lowercase ():
__lowerCAmelCase = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def lowercase ():
__lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def lowercase ():
__lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def lowercase ():
__lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowercase ():
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
__lowerCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def lowercase ():
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowercase ():
__lowerCAmelCase = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 301
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 1
|
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowercase (_lowerCAmelCase , _lowerCAmelCase=() , _lowerCAmelCase=None , _lowerCAmelCase="no" , _lowerCAmelCase="29500" ):
__lowerCAmelCase = False
__lowerCAmelCase = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
__lowerCAmelCase = True
elif "IPython" in sys.modules:
__lowerCAmelCase = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
__lowerCAmelCase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , _lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
__lowerCAmelCase = 8
__lowerCAmelCase = PrepareForLaunch(_lowerCAmelCase , distributed_type="""TPU""" )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="""127.0.01""" , master_port=_lowerCAmelCase , mixed_precision=_lowerCAmelCase ):
__lowerCAmelCase = PrepareForLaunch(_lowerCAmelCase , distributed_type="""MULTI_GPU""" )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCAmelCase = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase=() , _lowerCAmelCase=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
__lowerCAmelCase = PrepareForLaunch(_lowerCAmelCase , debug=_lowerCAmelCase )
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
| 301
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase ():
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , snake_case_ , )
@cached_property
def A__ ( self ) -> "torch.device":
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase = torch.device("""cpu""" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("""cuda""" , snake_case_ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case_ )
return device
@property
def A__ ( self ) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> Tuple:
return False
| 301
| 1
|
"""simple docstring"""
import sys
import turtle
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 301
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase (_lowerCAmelCase ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase = model_type_to_module_name(_lowerCAmelCase )
__lowerCAmelCase = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(_lowerCAmelCase , _lowerCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowerCAmelCase , """__name__""" , _lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase = importlib.import_module("""transformers""" )
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
return getattr(_lowerCAmelCase , _lowerCAmelCase )
return None
def lowercase (_lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , **_lowerCAmelCase , ):
__lowerCAmelCase = get_file_from_repo(
_lowerCAmelCase , _lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , resume_download=_lowerCAmelCase , proxies=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , local_files_only=_lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as reader:
return json.load(_lowerCAmelCase )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self ) -> str:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def A__ ( cls , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = kwargs.pop("""config""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""trust_remote_code""" , snake_case_ )
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(snake_case_ , **snake_case_ )
__lowerCAmelCase = config_dict.get("""feature_extractor_type""" , snake_case_ )
__lowerCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__lowerCAmelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.feature_extractor_type``
__lowerCAmelCase = getattr(snake_case_ , """feature_extractor_type""" , snake_case_ )
if hasattr(snake_case_ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__lowerCAmelCase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
__lowerCAmelCase = feature_extractor_class_from_name(snake_case_ )
__lowerCAmelCase = feature_extractor_auto_map is not None
__lowerCAmelCase = feature_extractor_class is not None or type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING
__lowerCAmelCase = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
__lowerCAmelCase = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
__lowerCAmelCase = kwargs.pop("""code_revision""" , snake_case_ )
if os.path.isdir(snake_case_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING:
__lowerCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(snake_case_ )]
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def A__ ( snake_case_ , snake_case_ ) -> str:
FEATURE_EXTRACTOR_MAPPING.register(snake_case_ , snake_case_ )
| 301
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''dpr'''
def __init__( self , snake_case_=30_522 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1e-1_2 , snake_case_=0 , snake_case_="absolute" , snake_case_ = 0 , **snake_case_ , ) -> Tuple:
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = (DDPMParallelScheduler,)
def A__ ( self , **snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**snake_case_ )
return config
def A__ ( self ) -> List[str]:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def A__ ( self ) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def A__ ( self ) -> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def A__ ( self ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case_ )
def A__ ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def A__ ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=snake_case_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , )
def A__ ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def A__ ( self ) -> str:
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def A__ ( self ) -> str:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case_ )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = self.dummy_sample_deter + 0.1
__lowerCAmelCase = self.dummy_sample_deter - 0.1
__lowerCAmelCase = samplea.shape[0]
__lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCAmelCase = torch.arange(snake_case_ )[0:3, None].repeat(1 , snake_case_ )
__lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCAmelCase = scheduler.batch_step_no_noise(snake_case_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__lowerCAmelCase = torch.sum(torch.abs(snake_case_ ) )
__lowerCAmelCase = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case_ )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
__lowerCAmelCase = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(snake_case_ ) )
__lowerCAmelCase = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowerCAmelCase = scheduler_class(**snake_case_ )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
__lowerCAmelCase = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(snake_case_ ) )
__lowerCAmelCase = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def A__ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case_ )
__lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
__lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(snake_case_ ):
if i == len(snake_case_ ) - 1:
__lowerCAmelCase = -1
else:
__lowerCAmelCase = timesteps[i + 1]
__lowerCAmelCase = scheduler.previous_timestep(snake_case_ )
__lowerCAmelCase = prev_t.item()
self.assertEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case_ )
__lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case_ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case_ )
__lowerCAmelCase = [100, 87, 50, 1, 0]
__lowerCAmelCase = len(snake_case_ )
with self.assertRaises(snake_case_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case_ )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 301
|
"""simple docstring"""
import sys
import turtle
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 301
| 1
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 50 ):
__lowerCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 301
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
SCREAMING_SNAKE_CASE_ = {
'''ctrl''': 256,
}
SCREAMING_SNAKE_CASE_ = {
'''Pregnancy''': 168_629,
'''Christianity''': 7_675,
'''Explain''': 106_423,
'''Fitness''': 63_440,
'''Saving''': 63_163,
'''Ask''': 27_171,
'''Ass''': 95_985,
'''Joke''': 163_509,
'''Questions''': 45_622,
'''Thoughts''': 49_605,
'''Retail''': 52_342,
'''Feminism''': 164_338,
'''Writing''': 11_992,
'''Atheism''': 192_263,
'''Netflix''': 48_616,
'''Computing''': 39_639,
'''Opinion''': 43_213,
'''Alone''': 44_967,
'''Funny''': 58_917,
'''Gaming''': 40_358,
'''Human''': 4_088,
'''India''': 1_331,
'''Joker''': 77_138,
'''Diet''': 36_206,
'''Legal''': 11_859,
'''Norman''': 4_939,
'''Tip''': 72_689,
'''Weight''': 52_343,
'''Movies''': 46_273,
'''Running''': 23_425,
'''Science''': 2_090,
'''Horror''': 37_793,
'''Confession''': 60_572,
'''Finance''': 12_250,
'''Politics''': 16_360,
'''Scary''': 191_985,
'''Support''': 12_654,
'''Technologies''': 32_516,
'''Teenage''': 66_160,
'''Event''': 32_769,
'''Learned''': 67_460,
'''Notion''': 182_770,
'''Wikipedia''': 37_583,
'''Books''': 6_665,
'''Extract''': 76_050,
'''Confessions''': 102_701,
'''Conspiracy''': 75_932,
'''Links''': 63_674,
'''Narcissus''': 150_425,
'''Relationship''': 54_766,
'''Relationships''': 134_796,
'''Reviews''': 41_671,
'''News''': 4_256,
'''Translation''': 26_820,
'''multilingual''': 128_406,
}
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
__lowerCAmelCase = set(_lowerCAmelCase )
return pairs
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = CONTROL_CODES
def __init__( self , snake_case_ , snake_case_ , snake_case_="<unk>" , **snake_case_ ) -> Optional[int]:
super().__init__(unk_token=snake_case_ , **snake_case_ )
with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle:
__lowerCAmelCase = json.load(snake_case_ )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(snake_case_ , encoding="""utf-8""" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
__lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowerCAmelCase = {}
@property
def A__ ( self ) -> List[Any]:
return len(self.encoder )
def A__ ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , snake_case_ ) -> Tuple:
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(snake_case_ )
__lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__lowerCAmelCase = get_pairs(snake_case_ )
if not pairs:
return token
while True:
__lowerCAmelCase = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(snake_case_ ):
try:
__lowerCAmelCase = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(snake_case_ )
__lowerCAmelCase = new_word
if len(snake_case_ ) == 1:
break
else:
__lowerCAmelCase = get_pairs(snake_case_ )
__lowerCAmelCase = """@@ """.join(snake_case_ )
__lowerCAmelCase = word[:-4]
__lowerCAmelCase = word
return word
def A__ ( self , snake_case_ ) -> str:
__lowerCAmelCase = []
__lowerCAmelCase = re.findall(r"""\S+\n?""" , snake_case_ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case_ ).split(""" """ ) ) )
return split_tokens
def A__ ( self , snake_case_ ) -> Any:
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def A__ ( self , snake_case_ ) -> List[str]:
return self.decoder.get(snake_case_ , self.unk_token )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """ """.join(snake_case_ ).replace("""@@ """ , """""" ).strip()
return out_string
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" )
__lowerCAmelCase = 0
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
__lowerCAmelCase = token_index
writer.write(""" """.join(snake_case_ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 301
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = StableDiffusionLatentUpscalePipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_snake_case = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case = frozenset([] )
_snake_case = True
@property
def A__ ( self ) -> Any:
__lowerCAmelCase = 1
__lowerCAmelCase = 4
__lowerCAmelCase = (16, 16)
__lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
def A__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=snake_case_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=snake_case_ , only_cross_attention=snake_case_ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
__lowerCAmelCase = EulerDiscreteScheduler(prediction_type="""sample""" )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
__lowerCAmelCase = CLIPTextModel(snake_case_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def A__ ( self , snake_case_ , snake_case_=0 ) -> Optional[Any]:
if str(snake_case_ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
__lowerCAmelCase = pipe(**snake_case_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
__lowerCAmelCase = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1e-3 )
def A__ ( self ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def A__ ( self ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def A__ ( self ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A__ ( self ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def A__ ( self ) -> int:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def A__ ( self ) -> Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def A__ ( self ) -> int:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def A__ ( self ) -> Dict:
__lowerCAmelCase = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**snake_case_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
__lowerCAmelCase = 2
__lowerCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__lowerCAmelCase = getattr(snake_case_ , scheduler_enum.name )
__lowerCAmelCase = scheduler_cls.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe(**snake_case_ )[0]
outputs.append(snake_case_ )
assert check_same_shape(snake_case_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> str:
__lowerCAmelCase = torch.manual_seed(33 )
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
__lowerCAmelCase = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
__lowerCAmelCase = pipe(snake_case_ , generator=snake_case_ , output_type="""latent""" ).images
__lowerCAmelCase = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=20 , guidance_scale=0 , generator=snake_case_ , output_type="""np""" , ).images[0]
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = torch.manual_seed(33 )
__lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
__lowerCAmelCase = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
__lowerCAmelCase = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=20 , guidance_scale=0 , generator=snake_case_ , output_type="""np""" , ).images[0]
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 301
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 1
|
"""simple docstring"""
import math
from collections.abc import Callable
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = xa
__lowerCAmelCase = xa
while True:
if x_n == x_na or function(_lowerCAmelCase ) == function(_lowerCAmelCase ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
__lowerCAmelCase = x_na - (
function(_lowerCAmelCase ) / ((function(_lowerCAmelCase ) - function(_lowerCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__lowerCAmelCase = x_na
__lowerCAmelCase = x_na
def lowercase (_lowerCAmelCase ):
return math.pow(_lowerCAmelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 301
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = []
for _ in range(_lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = []
for step in range(_lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(_lowerCAmelCase , """schedule.bin""" )
torch.save(scheduler.state_dict() , _lowerCAmelCase )
__lowerCAmelCase = torch.load(_lowerCAmelCase )
scheduler.load_state_dict(_lowerCAmelCase )
return lrs
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
__lowerCAmelCase = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case_ , weight_decay=0.0 , relative_step=snake_case_ , scale_parameter=snake_case_ , warmup_init=snake_case_ , )
for _ in range(1_000 ):
__lowerCAmelCase = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_snake_case = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_snake_case = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_snake_case = 1_0
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> str:
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ , msg=snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowerCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__lowerCAmelCase , __lowerCAmelCase = data
__lowerCAmelCase = scheduler_func(self.optimizer , **snake_case_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowerCAmelCase = unwrap_schedule(snake_case_ , self.num_steps )
self.assertListAlmostEqual(
snake_case_ , snake_case_ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
__lowerCAmelCase = scheduler_func(self.optimizer , **snake_case_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case_ ) # wrap to test picklability of the schedule
__lowerCAmelCase = unwrap_and_save_reload_schedule(snake_case_ , self.num_steps )
self.assertListEqual(snake_case_ , snake_case_ , msg=f"""failed for {scheduler_func} in save and reload""" )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ ) -> Dict:
__lowerCAmelCase = fn
def __call__( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
return self.fn(*snake_case_ , **snake_case_ )
@classmethod
def A__ ( self , snake_case_ ) -> Tuple:
__lowerCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 301
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''umt5'''
_snake_case = ['''past_key_values''']
def __init__( self , snake_case_=250_112 , snake_case_=512 , snake_case_=64 , snake_case_=1_024 , snake_case_=8 , snake_case_=None , snake_case_=6 , snake_case_=32 , snake_case_=128 , snake_case_=0.1 , snake_case_=1e-6 , snake_case_=1.0 , snake_case_="gated-gelu" , snake_case_=True , snake_case_=True , snake_case_="T5Tokenizer" , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=0 , **snake_case_ , ) -> Optional[int]:
super().__init__(
is_encoder_decoder=snake_case_ , tokenizer_class=snake_case_ , tie_word_embeddings=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_kv
__lowerCAmelCase = d_ff
__lowerCAmelCase = num_layers
__lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowerCAmelCase = num_heads
__lowerCAmelCase = relative_attention_num_buckets
__lowerCAmelCase = relative_attention_max_distance
__lowerCAmelCase = dropout_rate
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = feed_forward_proj
__lowerCAmelCase = use_cache
__lowerCAmelCase = self.feed_forward_proj.split("""-""" )
__lowerCAmelCase = act_info[-1]
__lowerCAmelCase = act_info[0] == """gated"""
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
__lowerCAmelCase = """gelu_new"""
@property
def A__ ( self ) -> Optional[Any]:
return self.d_model
@property
def A__ ( self ) -> str:
return self.num_heads
@property
def A__ ( self ) -> Tuple:
return self.num_layers
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__lowerCAmelCase = """past_encoder_sequence + sequence"""
__lowerCAmelCase = {0: """batch"""}
__lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
__lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A__ ( self ) -> int:
return 13
@property
def A__ ( self ) -> float:
return 5e-4
| 301
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __lt__( self , snake_case_ ) -> Dict:
return self[-1] < other[-1]
def __eq__( self , snake_case_ ) -> Union[str, Any]:
return self[-1] == other[-1]
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = []
# sort into stacks
for element in collection:
__lowerCAmelCase = Stack([element] )
__lowerCAmelCase = bisect_left(_lowerCAmelCase , _lowerCAmelCase )
if i != len(_lowerCAmelCase ):
stacks[i].append(_lowerCAmelCase )
else:
stacks.append(_lowerCAmelCase )
# use a heap-based merge to merge stack efficiently
__lowerCAmelCase = merge(*(reversed(_lowerCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 301
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 1
|
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE_ = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
SCREAMING_SNAKE_CASE_ = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
SCREAMING_SNAKE_CASE_ = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return float((preds == labels).mean() )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = float(fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = np.array(_lowerCAmelCase )
__lowerCAmelCase = np.array(_lowerCAmelCase )
__lowerCAmelCase = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase = en_sentvecs - np.mean(_lowerCAmelCase , axis=0 )
__lowerCAmelCase = in_sentvecs - np.mean(_lowerCAmelCase , axis=0 )
__lowerCAmelCase = cdist(_lowerCAmelCase , _lowerCAmelCase , """cosine""" )
__lowerCAmelCase = np.array(range(_lowerCAmelCase ) )
__lowerCAmelCase = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ) -> Optional[int]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def A__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(snake_case_ , snake_case_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(snake_case_ , snake_case_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 301
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = LEDTokenizer
_snake_case = LEDTokenizerFast
_snake_case = True
def A__ ( self ) -> Tuple:
super().setUp()
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def A__ ( self , **snake_case_ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , **snake_case_ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , snake_case_ ) -> str:
return "lower newer", "lower newer"
@cached_property
def A__ ( self ) -> List[Any]:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def A__ ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def A__ ( self ) -> Dict:
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
@require_torch
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def A__ ( self ) -> Tuple:
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def A__ ( self ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def A__ ( self ) -> Dict:
__lowerCAmelCase = ["""A long paragraph for summarization."""]
__lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(snake_case_ , return_tensors="""pt""" )
__lowerCAmelCase = tokenizer(text_target=snake_case_ , return_tensors="""pt""" )
__lowerCAmelCase = inputs["""input_ids"""]
__lowerCAmelCase = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def A__ ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = ["""Summary of the text.""", """Another summary."""]
__lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ )
__lowerCAmelCase = [[0] * len(snake_case_ ) for x in encoded_output["""input_ids"""]]
__lowerCAmelCase = tokenizer.pad(snake_case_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , snake_case_ )
def A__ ( self ) -> Optional[Any]:
pass
def A__ ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
__lowerCAmelCase = """A, <mask> AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
__lowerCAmelCase = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 301
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''mobilenet_v1'''
def __init__( self , snake_case_=3 , snake_case_=224 , snake_case_=1.0 , snake_case_=8 , snake_case_="relu6" , snake_case_=True , snake_case_=0.999 , snake_case_=0.02 , snake_case_=0.001 , **snake_case_ , ) -> Union[str, Any]:
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = depth_multiplier
__lowerCAmelCase = min_depth
__lowerCAmelCase = hidden_act
__lowerCAmelCase = tf_padding
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = version.parse('''1.11''' )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def A__ ( self ) -> float:
return 1e-4
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 1
|
"""simple docstring"""
import random
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = [], [], []
for element in data:
if element < pivot:
less.append(_lowerCAmelCase )
elif element > pivot:
greater.append(_lowerCAmelCase )
else:
equal.append(_lowerCAmelCase )
return less, equal, greater
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowerCAmelCase ) or index < 0:
return None
__lowerCAmelCase = items[random.randint(0 , len(_lowerCAmelCase ) - 1 )]
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = _partition(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = len(_lowerCAmelCase )
__lowerCAmelCase = len(_lowerCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowerCAmelCase , _lowerCAmelCase )
# must be in larger
else:
return quick_select(_lowerCAmelCase , index - (m + count) )
| 301
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 1
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase , output_loading_info=_lowerCAmelCase )
else:
__lowerCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase , output_loading_info=_lowerCAmelCase )
__lowerCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
__lowerCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowerCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
__lowerCAmelCase = prophet
__lowerCAmelCase = prophet_old
else:
__lowerCAmelCase = prophet.prophetnet
__lowerCAmelCase = prophet_old.model
__lowerCAmelCase = False
for attribute in attributes:
if attribute in mapping:
__lowerCAmelCase = mapping[attribute]
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = attribute
elif hasattr(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCAmelCase = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
__lowerCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCAmelCase = old_model.bias
logger.info(f"""{attribute} is initialized""" )
__lowerCAmelCase = True
break
elif attribute in special_keys and hasattr(_lowerCAmelCase , """in_proj_weight""" ):
__lowerCAmelCase = old_model.in_proj_weight.shape[0] // 3
__lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCAmelCase = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCAmelCase = True
break
if attribute.isdigit():
__lowerCAmelCase = model[int(_lowerCAmelCase )]
__lowerCAmelCase = old_model[int(_lowerCAmelCase )]
else:
__lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
if old_attribute == "":
__lowerCAmelCase = old_model
else:
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
__lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 301
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 1
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
SCREAMING_SNAKE_CASE_ = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
SCREAMING_SNAKE_CASE_ = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
SCREAMING_SNAKE_CASE_ = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="auto" , snake_case_=-1 , snake_case_=0.9 , snake_case_=5 , snake_case_=500 , snake_case_="gpt2-large" , snake_case_=-1 , snake_case_=1_024 , snake_case_=25 , snake_case_=5 , snake_case_=True , snake_case_=25 , ) -> str:
__lowerCAmelCase = compute_mauve(
p_text=snake_case_ , q_text=snake_case_ , p_features=snake_case_ , q_features=snake_case_ , p_tokens=snake_case_ , q_tokens=snake_case_ , num_buckets=snake_case_ , pca_max_data=snake_case_ , kmeans_explained_var=snake_case_ , kmeans_num_redo=snake_case_ , kmeans_max_iter=snake_case_ , featurize_model_name=snake_case_ , device_id=snake_case_ , max_text_length=snake_case_ , divergence_curve_discretization_size=snake_case_ , mauve_scaling_factor=snake_case_ , verbose=snake_case_ , seed=snake_case_ , )
return out
| 301
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 10 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or n < 0:
raise ValueError("""Invalid input""" )
__lowerCAmelCase = 10**n
__lowerCAmelCase = 2_8433 * (pow(2 , 783_0457 , _lowerCAmelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }")
| 301
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase ():
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , snake_case_ , )
@cached_property
def A__ ( self ) -> "torch.device":
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase = torch.device("""cpu""" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("""cuda""" , snake_case_ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case_ )
return device
@property
def A__ ( self ) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> Tuple:
return False
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def lowercase ():
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 301
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ ) -> None:
__lowerCAmelCase = order
# a_{0} ... a_{k}
__lowerCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__lowerCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__lowerCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
__lowerCAmelCase = [0.0] * self.order
def A__ ( self , snake_case_ , snake_case_ ) -> None:
if len(snake_case_ ) < self.order:
__lowerCAmelCase = [1.0, *a_coeffs]
if len(snake_case_ ) != self.order + 1:
__lowerCAmelCase = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
if len(snake_case_ ) != self.order + 1:
__lowerCAmelCase = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
__lowerCAmelCase = a_coeffs
__lowerCAmelCase = b_coeffs
def A__ ( self , snake_case_ ) -> float:
__lowerCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__lowerCAmelCase = self.input_history[:-1]
__lowerCAmelCase = self.output_history[:-1]
__lowerCAmelCase = sample
__lowerCAmelCase = result
return result
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__lowerCAmelCase = b * b - 4 * a * c
__lowerCAmelCase = (-b + sqrt(_lowerCAmelCase )) / (2 * a)
__lowerCAmelCase = (-b - sqrt(_lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowercase ():
__lowerCAmelCase , __lowerCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 301
|
"""simple docstring"""
import sys
import turtle
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 301
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Return True if there is node that has not iterated.
__lowerCAmelCase = [False] * len(_lowerCAmelCase )
__lowerCAmelCase = [s]
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCAmelCase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [-1] * (len(_lowerCAmelCase ))
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = float("""Inf""" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(_lowerCAmelCase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
for i in range(len(_lowerCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 301
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 1
|
"""simple docstring"""
from math import isclose, sqrt
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = point_y / 4 / point_x
__lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCAmelCase = outgoing_gradient**2 + 4
__lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
__lowerCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCAmelCase = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
__lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowercase (_lowerCAmelCase = 1.4 , _lowerCAmelCase = -9.6 ):
__lowerCAmelCase = 0
__lowerCAmelCase = first_x_coord
__lowerCAmelCase = first_y_coord
__lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 301
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
if n_term == "":
return []
__lowerCAmelCase = []
for temp in range(int(_lowerCAmelCase ) ):
series.append(f"""1/{temp + 1}""" if series else """1""" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 301
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = '''Muhammad Umer Farooq'''
SCREAMING_SNAKE_CASE_ = '''MIT'''
SCREAMING_SNAKE_CASE_ = '''1.0.0'''
SCREAMING_SNAKE_CASE_ = '''Muhammad Umer Farooq'''
SCREAMING_SNAKE_CASE_ = '''contact@muhammadumerfarooq.me'''
SCREAMING_SNAKE_CASE_ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> None:
super().__init__()
__lowerCAmelCase = []
__lowerCAmelCase = domain
def A__ ( self , snake_case_ , snake_case_ ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowerCAmelCase = parse.urljoin(self.domain , snake_case_ )
self.urls.append(snake_case_ )
def lowercase (_lowerCAmelCase ):
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split(""".""" )[-2:] )
def lowercase (_lowerCAmelCase ):
return parse.urlparse(_lowerCAmelCase ).netloc
def lowercase (_lowerCAmelCase = "https://github.com" ):
__lowerCAmelCase = get_domain_name(_lowerCAmelCase )
# Initialize the parser
__lowerCAmelCase = Parser(_lowerCAmelCase )
try:
# Open URL
__lowerCAmelCase = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowerCAmelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowerCAmelCase = requests.get(_lowerCAmelCase )
# Get the valid email.
__lowerCAmelCase = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = emails_from_url('''https://github.com''')
print(F"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 301
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 1
|
"""simple docstring"""
import torch
def lowercase ():
if torch.cuda.is_available():
__lowerCAmelCase = torch.cuda.device_count()
else:
__lowerCAmelCase = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 301
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = False
@skip_mps
class lowerCAmelCase_ ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = StableDiffusionAttendAndExcitePipeline
_snake_case = False
_snake_case = TEXT_TO_IMAGE_PARAMS
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
_snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A__ ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(snake_case_ )
@classmethod
def A__ ( cls ) -> List[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(snake_case_ )
def A__ ( self ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
__lowerCAmelCase = CLIPTextModel(snake_case_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ ( self , snake_case_ , snake_case_=0 ) -> Tuple:
if str(snake_case_ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = __lowerCAmelCase = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
__lowerCAmelCase = pipe(**snake_case_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__lowerCAmelCase = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1e-3 )
def A__ ( self ) -> str:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def A__ ( self ) -> Any:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A__ ( self ) -> Tuple:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def A__ ( self ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A__ ( self ) -> Union[str, Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def A__ ( self ) -> int:
super().test_save_load_local(expected_max_difference=5e-4 )
def A__ ( self ) -> Union[str, Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def A__ ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(snake_case_ )
@classmethod
def A__ ( cls ) -> Optional[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(snake_case_ )
def A__ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
__lowerCAmelCase = torch.manual_seed(51 )
__lowerCAmelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=snake_case_ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__lowerCAmelCase = """a painting of an elephant with glasses"""
__lowerCAmelCase = [5, 7]
__lowerCAmelCase = pipe(
prompt=snake_case_ , token_indices=snake_case_ , guidance_scale=7.5 , generator=snake_case_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 301
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
__lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
__lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 128
elif "large" in model_name:
__lowerCAmelCase = 192
elif "xlarge" in model_name:
__lowerCAmelCase = 256
elif "huge" in model_name:
__lowerCAmelCase = 352
# set label information
__lowerCAmelCase = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = """imagenet-22k-id2label.json"""
else:
__lowerCAmelCase = """imagenet-1k-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=_lowerCAmelCase , depths=_lowerCAmelCase , focal_levels=_lowerCAmelCase , focal_windows=_lowerCAmelCase , use_conv_embed=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase , use_post_layernorm=_lowerCAmelCase , use_layerscale=_lowerCAmelCase , )
return config
def lowercase (_lowerCAmelCase ):
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__lowerCAmelCase = """encoder.""" + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__lowerCAmelCase = """layernorm.weight"""
if name == "norm.bias":
__lowerCAmelCase = """layernorm.bias"""
if "head" in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
else:
__lowerCAmelCase = """focalnet.""" + name
return name
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
# fmt: off
__lowerCAmelCase = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("""Checkpoint URL: """ , _lowerCAmelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(_lowerCAmelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(_lowerCAmelCase )
__lowerCAmelCase = FocalNetForImageClassification(_lowerCAmelCase )
model.eval()
# load state dict
model.load_state_dict(_lowerCAmelCase )
# verify conversion
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = BitImageProcessor(
do_resize=_lowerCAmelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase , crop_size=224 , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , )
__lowerCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
__lowerCAmelCase = processor(images=_lowerCAmelCase , return_tensors="""pt""" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowerCAmelCase = image_transforms(_lowerCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowerCAmelCase , atol=1E-4 )
__lowerCAmelCase = model(**_lowerCAmelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 301
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def A__ ( *snake_case_ , **snake_case_ ) -> Dict:
pass
def lowercase (_lowerCAmelCase ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_snake_case = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model=snake_case_ , tokenizer=snake_case_ , image_processor=snake_case_ )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , """""" ) ) )
__lowerCAmelCase = """What is the placebo?"""
__lowerCAmelCase = [
{
"""image""": load_image(snake_case_ ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
__lowerCAmelCase = dqa_pipeline(snake_case_ , top_k=2 )
self.assertEqual(
snake_case_ , [
[
{"""score""": ANY(snake_case_ ), """answer""": ANY(snake_case_ ), """start""": ANY(snake_case_ ), """end""": ANY(snake_case_ )},
{"""score""": ANY(snake_case_ ), """answer""": ANY(snake_case_ ), """start""": ANY(snake_case_ ), """end""": ANY(snake_case_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self ) -> str:
__lowerCAmelCase = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """How many cats are there?"""
__lowerCAmelCase = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , snake_case_ )
__lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , snake_case_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(snake_case_ , [] )
# We can optionnally pass directly the words and bounding boxes
__lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , words=snake_case_ , boxes=snake_case_ , top_k=2 )
self.assertEqual(snake_case_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self ) -> str:
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self ) -> int:
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=snake_case_ )
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=snake_case_ , revision="""3dc6de3""" , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
__lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
__lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
__lowerCAmelCase = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , """""" ) ) )
# This model should also work if `image` is set to None
__lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self ) -> str:
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=snake_case_ )
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=snake_case_ , revision="""3dc6de3""" , max_seq_len=50 , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
__lowerCAmelCase = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , """""" ) ) )
# This model should also work if `image` is set to None
__lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def A__ ( self ) -> str:
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def A__ ( self ) -> int:
pass
| 301
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''unispeech-sat'''
def __init__( self , snake_case_=32 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1e-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(512, 512, 512, 512, 512, 512, 512) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(10, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=128 , snake_case_=16 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_=320 , snake_case_=2 , snake_case_=0.1 , snake_case_=100 , snake_case_=256 , snake_case_=256 , snake_case_=0.1 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=256 , snake_case_=(512, 512, 512, 512, 1_500) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=512 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=504 , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = feat_extract_norm
__lowerCAmelCase = feat_extract_activation
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = conv_bias
__lowerCAmelCase = num_conv_pos_embeddings
__lowerCAmelCase = num_conv_pos_embedding_groups
__lowerCAmelCase = len(self.conv_dim )
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = feat_proj_dropout
__lowerCAmelCase = final_dropout
__lowerCAmelCase = layerdrop
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = vocab_size
__lowerCAmelCase = num_clusters
__lowerCAmelCase = do_stable_layer_norm
__lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = apply_spec_augment
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase = num_codevectors_per_group
__lowerCAmelCase = num_codevector_groups
__lowerCAmelCase = contrastive_logits_temperature
__lowerCAmelCase = feat_quantizer_dropout
__lowerCAmelCase = num_negatives
__lowerCAmelCase = codevector_dim
__lowerCAmelCase = proj_codevector_dim
__lowerCAmelCase = diversity_loss_weight
# ctc loss
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = xvector_output_dim
@property
def A__ ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 301
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_lowerCAmelCase , """r""" ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = f"""class {class_name}("""
__lowerCAmelCase = f"""{4 * " "}def {test_name}("""
__lowerCAmelCase = f"""{8 * " "}{correct_line.split()[0]}"""
__lowerCAmelCase = f"""{16 * " "}{correct_line.split()[0]}"""
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
for line in lines:
if line.startswith(_lowerCAmelCase ):
__lowerCAmelCase = True
elif in_class and line.startswith(_lowerCAmelCase ):
__lowerCAmelCase = True
elif in_class and in_func and (line.startswith(_lowerCAmelCase ) or line.startswith(_lowerCAmelCase )):
__lowerCAmelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__lowerCAmelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__lowerCAmelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
__lowerCAmelCase = __lowerCAmelCase = __lowerCAmelCase = __lowerCAmelCase = False
else:
new_lines.append(_lowerCAmelCase )
with open(_lowerCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase=None ):
if fail is not None:
with open(_lowerCAmelCase , """r""" ) as f:
__lowerCAmelCase = {l.strip() for l in f.readlines()}
else:
__lowerCAmelCase = None
with open(_lowerCAmelCase , """r""" ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = defaultdict(_lowerCAmelCase )
for line in correct_lines:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 301
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 1
|
"""simple docstring"""
import os
def lowercase (_lowerCAmelCase = "matrix.txt" ):
with open(os.path.join(os.path.dirname(_lowerCAmelCase ) , _lowerCAmelCase ) ) as in_file:
__lowerCAmelCase = in_file.read()
__lowerCAmelCase = [[int(_lowerCAmelCase ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
__lowerCAmelCase = [[0 for cell in row] for row in grid]
__lowerCAmelCase = len(grid[0] )
__lowerCAmelCase = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
__lowerCAmelCase = grid[0][0]
for i in range(1 , _lowerCAmelCase ):
__lowerCAmelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , _lowerCAmelCase ):
__lowerCAmelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
__lowerCAmelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"{solution() = }")
| 301
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = 42
class lowerCAmelCase_ ( A__ , A__ ):
'''simple docstring'''
_snake_case = True
@register_to_config
def __init__( self , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = ("DownEncoderBlock2D",) , snake_case_ = ("UpDecoderBlock2D",) , snake_case_ = (64,) , snake_case_ = 1 , snake_case_ = "silu" , snake_case_ = 4 , snake_case_ = 32 , snake_case_ = 32 , snake_case_ = 0.18_215 , ) -> Any:
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=snake_case_ , out_channels=snake_case_ , down_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , act_fn=snake_case_ , norm_num_groups=snake_case_ , double_z=snake_case_ , )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=snake_case_ , out_channels=snake_case_ , up_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , norm_num_groups=snake_case_ , act_fn=snake_case_ , )
__lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCAmelCase = nn.Convad(snake_case_ , snake_case_ , 1 )
__lowerCAmelCase = False
__lowerCAmelCase = False
# only relevant if vae tiling is enabled
__lowerCAmelCase = self.config.sample_size
__lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCAmelCase = 0.25
def A__ ( self , snake_case_ , snake_case_=False ) -> str:
if isinstance(snake_case_ , (Encoder, Decoder) ):
__lowerCAmelCase = value
def A__ ( self , snake_case_ = True ) -> Dict:
__lowerCAmelCase = use_tiling
def A__ ( self ) -> List[Any]:
self.enable_tiling(snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = True
def A__ ( self ) -> Tuple:
__lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ) -> Dict[str, AttentionProcessor]:
__lowerCAmelCase = {}
def fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , """set_processor""" ):
__lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , snake_case_ , snake_case_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ )
return processors
def A__ ( self , snake_case_ ) -> Dict:
__lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(snake_case_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , """set_processor""" ):
if not isinstance(snake_case_ , snake_case_ ):
module.set_processor(snake_case_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , snake_case_ , snake_case_ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def A__ ( self , snake_case_ , snake_case_ = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(snake_case_ , return_dict=snake_case_ )
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase = [self.encoder(snake_case_ ) for x_slice in x.split(1 )]
__lowerCAmelCase = torch.cat(snake_case_ )
else:
__lowerCAmelCase = self.encoder(snake_case_ )
__lowerCAmelCase = self.quant_conv(snake_case_ )
__lowerCAmelCase = DiagonalGaussianDistribution(snake_case_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case_ )
def A__ ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(snake_case_ , return_dict=snake_case_ )
__lowerCAmelCase = self.post_quant_conv(snake_case_ )
__lowerCAmelCase = self.decoder(snake_case_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
@apply_forward_hook
def A__ ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase = [self._decode(snake_case_ ).sample for z_slice in z.split(1 )]
__lowerCAmelCase = torch.cat(snake_case_ )
else:
__lowerCAmelCase = self._decode(snake_case_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=snake_case_ )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
__lowerCAmelCase = min(a.shape[2] , b.shape[2] , snake_case_ )
for y in range(snake_case_ ):
__lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = min(a.shape[3] , b.shape[3] , snake_case_ )
for x in range(snake_case_ ):
__lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def A__ ( self , snake_case_ , snake_case_ = True ) -> AutoencoderKLOutput:
__lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase = []
for i in range(0 , x.shape[2] , snake_case_ ):
__lowerCAmelCase = []
for j in range(0 , x.shape[3] , snake_case_ ):
__lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase = self.encoder(snake_case_ )
__lowerCAmelCase = self.quant_conv(snake_case_ )
row.append(snake_case_ )
rows.append(snake_case_ )
__lowerCAmelCase = []
for i, row in enumerate(snake_case_ ):
__lowerCAmelCase = []
for j, tile in enumerate(snake_case_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , snake_case_ , snake_case_ )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , snake_case_ , snake_case_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case_ , dim=3 ) )
__lowerCAmelCase = torch.cat(snake_case_ , dim=2 )
__lowerCAmelCase = DiagonalGaussianDistribution(snake_case_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case_ )
def A__ ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase = []
for i in range(0 , z.shape[2] , snake_case_ ):
__lowerCAmelCase = []
for j in range(0 , z.shape[3] , snake_case_ ):
__lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase = self.post_quant_conv(snake_case_ )
__lowerCAmelCase = self.decoder(snake_case_ )
row.append(snake_case_ )
rows.append(snake_case_ )
__lowerCAmelCase = []
for i, row in enumerate(snake_case_ ):
__lowerCAmelCase = []
for j, tile in enumerate(snake_case_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , snake_case_ , snake_case_ )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , snake_case_ , snake_case_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case_ , dim=3 ) )
__lowerCAmelCase = torch.cat(snake_case_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
def A__ ( self , snake_case_ , snake_case_ = False , snake_case_ = True , snake_case_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(snake_case_ ).latent_dist
if sample_posterior:
__lowerCAmelCase = posterior.sample(generator=snake_case_ )
else:
__lowerCAmelCase = posterior.mode()
__lowerCAmelCase = self.decode(snake_case_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
| 301
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 1
|
"""simple docstring"""
import os
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = len(grid[0] )
__lowerCAmelCase = len(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCAmelCase ):
for j in range(n_rows - 3 ):
__lowerCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__lowerCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__lowerCAmelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__lowerCAmelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__lowerCAmelCase = max(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if max_product > largest:
__lowerCAmelCase = max_product
return largest
def lowercase ():
__lowerCAmelCase = []
with open(os.path.dirname(_lowerCAmelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__lowerCAmelCase = [[int(_lowerCAmelCase ) for i in grid[j]] for j in range(len(_lowerCAmelCase ) )]
return largest_product(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 301
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE_ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 1
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_lowerCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 301
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase ():
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , snake_case_ , )
@cached_property
def A__ ( self ) -> "torch.device":
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase = torch.device("""cpu""" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("""cuda""" , snake_case_ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case_ )
return device
@property
def A__ ( self ) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> Tuple:
return False
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 200 ):
__lowerCAmelCase = [1, 2, 5, 10, 20, 50, 100, 200]
__lowerCAmelCase = [0] * (pence + 1)
__lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 301
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
SCREAMING_SNAKE_CASE_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
elif weight_type == "running_mean":
__lowerCAmelCase = value
elif weight_type == "running_var":
__lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
__lowerCAmelCase = value
elif weight_type == "inv_freq":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , _lowerCAmelCase )
if "pos_bias_u" in name:
__lowerCAmelCase = None
elif "pos_bias_v" in name:
__lowerCAmelCase = None
elif "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
elif "running_mean" in name:
__lowerCAmelCase = """running_mean"""
elif "inv_freq" in name:
__lowerCAmelCase = """inv_freq"""
elif "running_var" in name:
__lowerCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
__lowerCAmelCase = """num_batches_tracked"""
else:
__lowerCAmelCase = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ):
if config_path is not None:
__lowerCAmelCase = WavaVecaConformerConfig.from_pretrained(_lowerCAmelCase , hidden_act="""swish""" )
else:
__lowerCAmelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowerCAmelCase = """rotary"""
if is_finetuned:
if dict_path:
__lowerCAmelCase = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.eos_index
__lowerCAmelCase = len(target_dict.symbols )
__lowerCAmelCase = os.path.join(_lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCAmelCase = 0
__lowerCAmelCase = 1
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCAmelCase , )
__lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
__lowerCAmelCase = WavaVecaConformerForCTC(_lowerCAmelCase )
else:
__lowerCAmelCase = WavaVecaConformerForPreTraining(_lowerCAmelCase )
if is_finetuned:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
__lowerCAmelCase = fairseq.tasks.setup_task(_lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 301
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
if number > 0:
raise ValueError("""input must be a negative integer""" )
__lowerCAmelCase = len(bin(_lowerCAmelCase )[3:] )
__lowerCAmelCase = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:]
__lowerCAmelCase = (
(
"""1"""
+ """0""" * (binary_number_length - len(_lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''bridgetower_vision_model'''
def __init__( self , snake_case_=768 , snake_case_=12 , snake_case_=3 , snake_case_=16 , snake_case_=288 , snake_case_=1 , snake_case_=1e-0_5 , snake_case_=False , snake_case_=True , snake_case_=False , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = stop_gradient
__lowerCAmelCase = share_layernorm
__lowerCAmelCase = remove_last_layer
@classmethod
def A__ ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(snake_case_ , **snake_case_ )
if config_dict.get("""model_type""" ) == "bridgetower":
__lowerCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''bridgetower_text_model'''
def __init__( self , snake_case_=50_265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=1 , snake_case_=3_072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=514 , snake_case_=1 , snake_case_=1e-0_5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , **snake_case_ , ) -> Optional[Any]:
super().__init__(**snake_case_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
@classmethod
def A__ ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(snake_case_ , **snake_case_ )
if config_dict.get("""model_type""" ) == "bridgetower":
__lowerCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''bridgetower'''
def __init__( self , snake_case_=True , snake_case_="gelu" , snake_case_=768 , snake_case_=1 , snake_case_=1e-0_5 , snake_case_=False , snake_case_="add" , snake_case_=12 , snake_case_=6 , snake_case_=False , snake_case_=False , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
# TODO: remove this once the Hub files are updated.
__lowerCAmelCase = kwargs.pop("""text_config_dict""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""vision_config_dict""" , snake_case_ )
super().__init__(**snake_case_ )
__lowerCAmelCase = share_cross_modal_transformer_layers
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_size
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = share_link_tower_layers
__lowerCAmelCase = link_tower_type
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = tie_word_embeddings
__lowerCAmelCase = init_layernorm_from_vision_encoder
if text_config is None:
__lowerCAmelCase = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
__lowerCAmelCase = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
__lowerCAmelCase = BridgeTowerTextConfig(**snake_case_ )
__lowerCAmelCase = BridgeTowerVisionConfig(**snake_case_ )
@classmethod
def A__ ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.text_config.to_dict()
__lowerCAmelCase = self.vision_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 301
|
"""simple docstring"""
import sys
import turtle
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 301
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
SCREAMING_SNAKE_CASE_ = 8
def lowercase (_lowerCAmelCase , _lowerCAmelCase=BITS ):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x * 255).int().clamp(0 , 255 )
__lowerCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase )
__lowerCAmelCase = rearrange(_lowerCAmelCase , """d -> d 1 1""" )
__lowerCAmelCase = rearrange(_lowerCAmelCase , """b c h w -> b c 1 h w""" )
__lowerCAmelCase = ((x & mask) != 0).float()
__lowerCAmelCase = rearrange(_lowerCAmelCase , """b c d h w -> b (c d) h w""" )
__lowerCAmelCase = bits * 2 - 1
return bits
def lowercase (_lowerCAmelCase , _lowerCAmelCase=BITS ):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x > 0).int()
__lowerCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase , dtype=torch.intaa )
__lowerCAmelCase = rearrange(_lowerCAmelCase , """d -> d 1 1""" )
__lowerCAmelCase = rearrange(_lowerCAmelCase , """b (c d) h w -> b c d h w""" , d=8 )
__lowerCAmelCase = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def lowercase (self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = True , _lowerCAmelCase=None , _lowerCAmelCase = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[timestep]
__lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCAmelCase = self._get_variance(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCAmelCase = model_output.device if torch.is_tensor(_lowerCAmelCase ) else """cpu"""
__lowerCAmelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
__lowerCAmelCase = self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) ** 0.5 * eta * noise
__lowerCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def lowercase (self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="epsilon" , _lowerCAmelCase=None , _lowerCAmelCase = True , ):
__lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase , __lowerCAmelCase = torch.split(_lowerCAmelCase , sample.shape[1] , dim=1 )
else:
__lowerCAmelCase = None
# 1. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[t]
__lowerCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCAmelCase = 1 - alpha_prod_t
__lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCAmelCase = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase = 0
if t > 0:
__lowerCAmelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_lowerCAmelCase ).to(model_output.device )
__lowerCAmelCase = (self._get_variance(_lowerCAmelCase , predicted_variance=_lowerCAmelCase ) ** 0.5) * noise
__lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 1.0 , ) -> Tuple:
super().__init__()
__lowerCAmelCase = bit_scale
__lowerCAmelCase = (
ddim_bit_scheduler_step if isinstance(snake_case_ , snake_case_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ = 256 , snake_case_ = 256 , snake_case_ = 50 , snake_case_ = None , snake_case_ = 1 , snake_case_ = "pil" , snake_case_ = True , **snake_case_ , ) -> Union[Tuple, ImagePipelineOutput]:
__lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=snake_case_ , )
__lowerCAmelCase = decimal_to_bits(snake_case_ ) * self.bit_scale
__lowerCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(snake_case_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
__lowerCAmelCase = bits_to_decimal(snake_case_ )
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
__lowerCAmelCase = hex_num[0] == """-"""
if is_negative:
__lowerCAmelCase = hex_num[1:]
try:
__lowerCAmelCase = int(_lowerCAmelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
__lowerCAmelCase = """"""
while int_num > 0:
__lowerCAmelCase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
set_seed(770)
SCREAMING_SNAKE_CASE_ = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
SCREAMING_SNAKE_CASE_ = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
SCREAMING_SNAKE_CASE_ = os.path.dirname(os.path.abspath(__file__))
SCREAMING_SNAKE_CASE_ = os.path.join(os.path.expanduser('''~'''), '''.cache''')
SCREAMING_SNAKE_CASE_ = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = model_type
if use_small:
key += "_small"
return os.path.join(_lowerCAmelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
hf_hub_download(repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , local_dir=_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase="text" ):
if model_type == "text":
__lowerCAmelCase = BarkSemanticModel
__lowerCAmelCase = BarkSemanticConfig
__lowerCAmelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowerCAmelCase = BarkCoarseModel
__lowerCAmelCase = BarkCoarseConfig
__lowerCAmelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
__lowerCAmelCase = BarkFineModel
__lowerCAmelCase = BarkFineConfig
__lowerCAmelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowerCAmelCase = f"""{model_type}_small""" if use_small else model_type
__lowerCAmelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_lowerCAmelCase ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
# this is a hack
__lowerCAmelCase = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
__lowerCAmelCase = model_args["""vocab_size"""]
__lowerCAmelCase = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowerCAmelCase = model_args.pop("""n_head""" )
__lowerCAmelCase = model_args.pop("""n_embd""" )
__lowerCAmelCase = model_args.pop("""n_layer""" )
__lowerCAmelCase = ConfigClass(**checkpoint["""model_args"""] )
__lowerCAmelCase = ModelClass(config=_lowerCAmelCase )
__lowerCAmelCase = GenerationConfigClass()
__lowerCAmelCase = model_generation_config
__lowerCAmelCase = checkpoint["""model"""]
# fixup checkpoint
__lowerCAmelCase = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(_lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
__lowerCAmelCase = k[len(_lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
__lowerCAmelCase = new_k.replace(_lowerCAmelCase , new_layer_name_dict[old_layer_name] )
__lowerCAmelCase = state_dict.pop(_lowerCAmelCase )
__lowerCAmelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowerCAmelCase = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
__lowerCAmelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowerCAmelCase = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(_lowerCAmelCase ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(_lowerCAmelCase ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
__lowerCAmelCase = model.num_parameters(exclude_embeddings=_lowerCAmelCase )
__lowerCAmelCase = checkpoint["""best_val_loss"""].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(_lowerCAmelCase , 3 )} loss""" )
model.eval()
model.to(_lowerCAmelCase )
del checkpoint, state_dict
return model
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowerCAmelCase = """cpu""" # do conversion on cpu
__lowerCAmelCase = _get_ckpt_path(_lowerCAmelCase , use_small=_lowerCAmelCase )
__lowerCAmelCase = _load_model(_lowerCAmelCase , _lowerCAmelCase , model_type=_lowerCAmelCase , use_small=_lowerCAmelCase )
# load bark initial model
__lowerCAmelCase = _bark_load_model(_lowerCAmelCase , """cpu""" , model_type=_lowerCAmelCase , use_small=_lowerCAmelCase )
if model_type == "text":
__lowerCAmelCase = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=_lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
__lowerCAmelCase = 5
__lowerCAmelCase = 10
if model_type in ["text", "coarse"]:
__lowerCAmelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowerCAmelCase = bark_model(_lowerCAmelCase )[0]
__lowerCAmelCase = model(_lowerCAmelCase )
# take last logits
__lowerCAmelCase = output_new_model_total.logits[:, [-1], :]
else:
__lowerCAmelCase = 3
__lowerCAmelCase = 8
__lowerCAmelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = bark_model(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
__lowerCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = BarkSemanticConfig.from_pretrained(os.path.join(_lowerCAmelCase , """config.json""" ) )
__lowerCAmelCase = BarkCoarseConfig.from_pretrained(os.path.join(_lowerCAmelCase , """config.json""" ) )
__lowerCAmelCase = BarkFineConfig.from_pretrained(os.path.join(_lowerCAmelCase , """config.json""" ) )
__lowerCAmelCase = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
__lowerCAmelCase = BarkSemanticModel.from_pretrained(_lowerCAmelCase )
__lowerCAmelCase = BarkCoarseModel.from_pretrained(_lowerCAmelCase )
__lowerCAmelCase = BarkFineModel.from_pretrained(_lowerCAmelCase )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
__lowerCAmelCase = BarkConfig.from_sub_model_configs(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowerCAmelCase = BarkModel(_lowerCAmelCase )
__lowerCAmelCase = semantic
__lowerCAmelCase = coarseAcoustic
__lowerCAmelCase = fineAcoustic
__lowerCAmelCase = codec
__lowerCAmelCase = bark_generation_config
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
bark.save_pretrained(_lowerCAmelCase , repo_id=_lowerCAmelCase , push_to_hub=_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 301
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self ) -> Tuple:
__lowerCAmelCase = ort.SessionOptions()
__lowerCAmelCase = False
return options
def A__ ( self ) -> Tuple:
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
__lowerCAmelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = """A red cat sitting on a park bench"""
__lowerCAmelCase = np.random.RandomState(0 )
__lowerCAmelCase = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=snake_case_ , output_type="""np""" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 301
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE_ = {
'''RUCAIBox/mvp''': 1_024,
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> Optional[Any]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
__lowerCAmelCase = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = pre_tok_class(**snake_case_ )
__lowerCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCAmelCase = """post_processor"""
__lowerCAmelCase = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
__lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase = tuple(state["""sep"""] )
if "cls" in state:
__lowerCAmelCase = tuple(state["""cls"""] )
__lowerCAmelCase = False
if state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = True
if state.get("""trim_offsets""" , snake_case_ ) != trim_offsets:
__lowerCAmelCase = trim_offsets
__lowerCAmelCase = True
if changes_to_apply:
__lowerCAmelCase = getattr(snake_case_ , state.pop("""type""" ) )
__lowerCAmelCase = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def A__ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , snake_case_ ) -> Tuple:
__lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
__lowerCAmelCase = value
def A__ ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
__lowerCAmelCase = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def A__ ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
__lowerCAmelCase = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Dict:
__lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 301
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_snake_case = Features({'''audio''': Audio()} )
_snake_case = Features({'''transcription''': Value('''string''' )} )
_snake_case = "audio"
_snake_case = "transcription"
def A__ ( self , snake_case_ ) -> Optional[int]:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , snake_case_ ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
__lowerCAmelCase = copy.deepcopy(self )
__lowerCAmelCase = self.input_schema.copy()
__lowerCAmelCase = features[self.audio_column]
__lowerCAmelCase = input_schema
return task_template
@property
def A__ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 301
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''wavlm'''
def __init__( self , snake_case_=32 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1e-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(512, 512, 512, 512, 512, 512, 512) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(10, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=128 , snake_case_=16 , snake_case_=320 , snake_case_=800 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=320 , snake_case_=2 , snake_case_=0.1 , snake_case_=100 , snake_case_=256 , snake_case_=256 , snake_case_=0.1 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=256 , snake_case_=(512, 512, 512, 512, 1_500) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=512 , snake_case_=80 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = feat_extract_norm
__lowerCAmelCase = feat_extract_activation
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = conv_bias
__lowerCAmelCase = num_buckets
__lowerCAmelCase = max_bucket_distance
__lowerCAmelCase = num_conv_pos_embeddings
__lowerCAmelCase = num_conv_pos_embedding_groups
__lowerCAmelCase = len(self.conv_dim )
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = feat_proj_dropout
__lowerCAmelCase = final_dropout
__lowerCAmelCase = layerdrop
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_ctc_classes
__lowerCAmelCase = vocab_size
__lowerCAmelCase = do_stable_layer_norm
__lowerCAmelCase = use_weighted_layer_sum
__lowerCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = apply_spec_augment
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase = num_codevectors_per_group
__lowerCAmelCase = num_codevector_groups
__lowerCAmelCase = contrastive_logits_temperature
__lowerCAmelCase = num_negatives
__lowerCAmelCase = codevector_dim
__lowerCAmelCase = proj_codevector_dim
__lowerCAmelCase = diversity_loss_weight
# ctc loss
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# adapter
__lowerCAmelCase = add_adapter
__lowerCAmelCase = adapter_kernel_size
__lowerCAmelCase = adapter_stride
__lowerCAmelCase = num_adapter_layers
__lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = xvector_output_dim
@property
def A__ ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 301
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 1
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 1000 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 301
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''allenai/longformer-base-4096''': 4_096,
'''allenai/longformer-large-4096''': 4_096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase ():
__lowerCAmelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__lowerCAmelCase = bs[:]
__lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__lowerCAmelCase = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
return pairs
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ) -> Any:
__lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
__lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
__lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
__lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
__lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
__lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle:
__lowerCAmelCase = json.load(snake_case_ )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = errors # how to handle errors in decoding
__lowerCAmelCase = bytes_to_unicode()
__lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding="""utf-8""" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowerCAmelCase = {}
__lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCAmelCase = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def A__ ( self ) -> str:
return len(self.encoder )
def A__ ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , snake_case_ ) -> Dict:
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(snake_case_ )
__lowerCAmelCase = get_pairs(snake_case_ )
if not pairs:
return token
while True:
__lowerCAmelCase = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(snake_case_ ):
try:
__lowerCAmelCase = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(snake_case_ )
__lowerCAmelCase = new_word
if len(snake_case_ ) == 1:
break
else:
__lowerCAmelCase = get_pairs(snake_case_ )
__lowerCAmelCase = """ """.join(snake_case_ )
__lowerCAmelCase = word
return word
def A__ ( self , snake_case_ ) -> Union[str, Any]:
__lowerCAmelCase = []
for token in re.findall(self.pat , snake_case_ ):
__lowerCAmelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self , snake_case_ ) -> List[str]:
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def A__ ( self , snake_case_ ) -> Optional[int]:
return self.decoder.get(snake_case_ )
def A__ ( self , snake_case_ ) -> int:
__lowerCAmelCase = """""".join(snake_case_ )
__lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" )
__lowerCAmelCase = 0
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
__lowerCAmelCase = token_index
writer.write(""" """.join(snake_case_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , snake_case_ , snake_case_=False , **snake_case_ ) -> List[Any]:
__lowerCAmelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
__lowerCAmelCase = """ """ + text
return (text, kwargs)
| 301
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 1
|
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar('''T''')
class lowerCAmelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case_ = True ) -> None:
__lowerCAmelCase = {} # dictionary of lists
__lowerCAmelCase = directed
def A__ ( self , snake_case_ , snake_case_ ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
self.adj_list[destination_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
__lowerCAmelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case_ )
__lowerCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__lowerCAmelCase = [destination_vertex]
__lowerCAmelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
__lowerCAmelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__lowerCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__lowerCAmelCase = [destination_vertex]
__lowerCAmelCase = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 301
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''audio-spectrogram-transformer'''
def __init__( self , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1e-1_2 , snake_case_=16 , snake_case_=True , snake_case_=10 , snake_case_=10 , snake_case_=1_024 , snake_case_=128 , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = patch_size
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = frequency_stride
__lowerCAmelCase = time_stride
__lowerCAmelCase = max_length
__lowerCAmelCase = num_mel_bins
| 301
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
SCREAMING_SNAKE_CASE_ = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''rag'''
_snake_case = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=300 , snake_case_=768 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> List[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__lowerCAmelCase = kwargs.pop("""question_encoder""" )
__lowerCAmelCase = question_encoder_config.pop("""model_type""" )
__lowerCAmelCase = kwargs.pop("""generator""" )
__lowerCAmelCase = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__lowerCAmelCase = AutoConfig.for_model(snake_case_ , **snake_case_ )
__lowerCAmelCase = AutoConfig.for_model(snake_case_ , **snake_case_ )
__lowerCAmelCase = reduce_loss
__lowerCAmelCase = label_smoothing
__lowerCAmelCase = exclude_bos_score
__lowerCAmelCase = do_marginalize
__lowerCAmelCase = title_sep
__lowerCAmelCase = doc_sep
__lowerCAmelCase = n_docs
__lowerCAmelCase = max_combined_length
__lowerCAmelCase = dataset
__lowerCAmelCase = dataset_split
__lowerCAmelCase = index_name
__lowerCAmelCase = retrieval_vector_size
__lowerCAmelCase = retrieval_batch_size
__lowerCAmelCase = passages_path
__lowerCAmelCase = index_path
__lowerCAmelCase = use_dummy_dataset
__lowerCAmelCase = output_retrieved
__lowerCAmelCase = do_deduplication
__lowerCAmelCase = use_cache
if self.forced_eos_token_id is None:
__lowerCAmelCase = getattr(self.generator , """forced_eos_token_id""" , snake_case_ )
@classmethod
def A__ ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def A__ ( self ) -> Dict:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.question_encoder.to_dict()
__lowerCAmelCase = self.generator.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 301
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 1
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
SCREAMING_SNAKE_CASE_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
'''emoji''': True,
},
}
]
SCREAMING_SNAKE_CASE_ = 0
for log in Path().glob('''*.log'''):
SCREAMING_SNAKE_CASE_ = 0
with open(log, '''r''') as f:
for line in f:
SCREAMING_SNAKE_CASE_ = json.loads(line)
if line.get('''nodeid''', '''''') != "":
SCREAMING_SNAKE_CASE_ = line['''nodeid''']
if line.get('''duration''', None) is not None:
SCREAMING_SNAKE_CASE_ = F"{line['duration']:.4f}"
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
SCREAMING_SNAKE_CASE_ = []
log.unlink()
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {}
for test in failed_tests:
SCREAMING_SNAKE_CASE_ = test[0].split('''::''')
SCREAMING_SNAKE_CASE_ = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
SCREAMING_SNAKE_CASE_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
SCREAMING_SNAKE_CASE_ = [test[0] for test in failed_table]
SCREAMING_SNAKE_CASE_ = list(set(files))
# Count number of instances in failed_tests
SCREAMING_SNAKE_CASE_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
SCREAMING_SNAKE_CASE_ = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
SCREAMING_SNAKE_CASE_ = '''Too many failed tests, please see the full report in the Action results.'''
SCREAMING_SNAKE_CASE_ = len(err) + 10
SCREAMING_SNAKE_CASE_ = message[: 3_000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
SCREAMING_SNAKE_CASE_ = '''No failed tests! 🤗'''
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE_ = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
SCREAMING_SNAKE_CASE_ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
SCREAMING_SNAKE_CASE_ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
SCREAMING_SNAKE_CASE_ = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
SCREAMING_SNAKE_CASE_ = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
SCREAMING_SNAKE_CASE_ = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
SCREAMING_SNAKE_CASE_ = row[0]
else:
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 301
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 1
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
__lowerCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _lowerCAmelCase , )
is not None
):
__lowerCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowerCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowerCAmelCase = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__lowerCAmelCase = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__lowerCAmelCase = True
if not attribute_used:
__lowerCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowerCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowerCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowerCAmelCase = True
elif attribute.endswith("""_token_id""" ):
__lowerCAmelCase = True
# configuration class specific cases
if not case_allowed:
__lowerCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__lowerCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowerCAmelCase = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__lowerCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowerCAmelCase = {}
if len(config_class.attribute_map ) > 0:
__lowerCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowerCAmelCase = inspect.getsourcefile(_lowerCAmelCase )
__lowerCAmelCase = os.path.dirname(_lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowerCAmelCase = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for fn in os.listdir(_lowerCAmelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__lowerCAmelCase = []
for path in modeling_paths:
if os.path.isfile(_lowerCAmelCase ):
with open(_lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
__lowerCAmelCase = []
for config_param, default_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
__lowerCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowerCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCAmelCase : inspect.isclass(_lowerCAmelCase )
and issubclass(_lowerCAmelCase , _lowerCAmelCase )
and inspect.getmodule(_lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__lowerCAmelCase = check_config_attributes_being_used(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = unused_attributes
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
_snake_case = 1_0_0_0_0
_snake_case = None
_snake_case = None
class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_snake_case = ParquetConfig
def A__ ( self ) -> Union[str, Any]:
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self , snake_case_ ) -> List[str]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
__lowerCAmelCase = data_files
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(snake_case_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(snake_case_ ):
with open(snake_case_ , """rb""" ) as f:
__lowerCAmelCase = datasets.Features.from_arrow_schema(pq.read_schema(snake_case_ ) )
break
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) )
return splits
def A__ ( self , snake_case_ ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase = table_cast(snake_case_ , self.info.features.arrow_schema )
return pa_table
def A__ ( self , snake_case_ ) -> Union[str, Any]:
__lowerCAmelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
with open(snake_case_ , """rb""" ) as f:
__lowerCAmelCase = pq.ParquetFile(snake_case_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__lowerCAmelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(snake_case_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(snake_case_ )}: {e}""" )
raise
| 301
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.