code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5000 , 5000 )
return (arr, r)
a : Dict = make_dataset()
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for triplet in permutations(__magic_name__ , 3 ):
if sum(__magic_name__ ) == target:
return tuple(sorted(__magic_name__ ) )
return (0, 0, 0)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
arr.sort()
UpperCAmelCase : List[Any] = len(__magic_name__ )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : Any = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase : Union[str, Any] = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase : Optional[int] = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase : Dict = repeat(setup=__magic_name__ , stmt=__magic_name__ , repeat=5 , number=1_0000 )
UpperCAmelCase : Any = repeat(setup=__magic_name__ , stmt=__magic_name__ , repeat=5 , number=1_0000 )
return (min(__magic_name__ ), min(__magic_name__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
a : List[Any] = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 311
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 1
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowercase ( __magic_name__ ):
'''simple docstring'''
for i in range(0 , __magic_name__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def lowercase ( __magic_name__ ):
'''simple docstring'''
for i in range(__magic_name__ , 0 , -1 ):
for _ in range(__magic_name__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(__magic_name__ ) # upper half
reverse_floyd(__magic_name__ ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
a : int = 1
while K:
a : Tuple = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
a : str = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 311
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 1
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a : Optional[int] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
a : Union[str, Any] = parser.parse_args()
a : Optional[int] = "cpu"
a : Dict = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
a : Tuple = "path-to-your-trained-model"
a : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a : Optional[int] = pipe.to(device)
# to channels last
a : Dict = pipe.unet.to(memory_format=torch.channels_last)
a : str = pipe.vae.to(memory_format=torch.channels_last)
a : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a : Any = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a : Union[str, Any] = torch.randn(2, 4, 64, 64)
a : Optional[Any] = torch.rand(1) * 9_99
a : Optional[int] = torch.randn(2, 77, 7_68)
a : Tuple = (sample, timestep, encoder_hidden_status)
try:
a : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a : Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a : Optional[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a : Dict = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a : Optional[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a : str = 6_66
a : int = torch.Generator(device).manual_seed(seed)
a : Optional[Any] = {"generator": generator}
if args.steps is not None:
a : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a : Optional[int] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 311
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 1
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 1
|
'''simple docstring'''
from math import ceil
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = list(range(0 , __magic_name__ ) )
UpperCAmelCase : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCAmelCase : List[str] = []
for i in device_map_blocks:
if device_map_blocks.count(__magic_name__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__magic_name__ )
# Missing blocks
UpperCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
UpperCAmelCase : Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__magic_name__ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__magic_name__ ) )
if len(__magic_name__ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__magic_name__ ) )
if len(__magic_name__ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = list(range(__magic_name__ ) )
UpperCAmelCase : Union[str, Any] = int(ceil(n_layers / len(__magic_name__ ) ) )
UpperCAmelCase : Tuple = [layers[i : i + n_blocks] for i in range(0 , __magic_name__ , __magic_name__ )]
return dict(zip(__magic_name__ , __magic_name__ ) )
| 311
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 1
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["audio_values", "audio_mask"]
def __init__( self , snake_case=2_0_4_8 , snake_case=1 , snake_case=[1_6, 1_6] , snake_case=1_2_8 , snake_case=4_4_1_0_0 , snake_case=8_6 , snake_case=2_0_4_8 , snake_case=0.0 , **snake_case , ):
'''simple docstring'''
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case , )
UpperCAmelCase : Dict = spectrogram_length
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Any = patch_size
UpperCAmelCase : Union[str, Any] = feature_size // self.patch_size[1]
UpperCAmelCase : Optional[Any] = n_fft
UpperCAmelCase : str = sampling_rate // hop_length_to_sampling_rate
UpperCAmelCase : Optional[Any] = sampling_rate
UpperCAmelCase : List[Any] = padding_value
UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=snake_case , norm="slaney" , mel_scale="slaney" , ).T
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = spectrogram(
snake_case , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCAmelCase : Optional[Any] = log_spec[:, :-1]
UpperCAmelCase : Dict = log_spec - 20.0
UpperCAmelCase : List[str] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , snake_case , snake_case = None , snake_case = True , snake_case = None , snake_case = False , snake_case = False , **snake_case , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCAmelCase : Union[str, Any] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
UpperCAmelCase : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : List[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCAmelCase : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , snake_case ):
UpperCAmelCase : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCAmelCase : Dict = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCAmelCase : Optional[int] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCAmelCase : str = np.array(snake_case ).astype(np.floataa )
# convert into correct format for padding
UpperCAmelCase : Dict = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCAmelCase : Optional[Any] = np.ones([len(snake_case ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCAmelCase : Union[str, Any] = padded_audio_features * self.padding_value
for i in range(len(snake_case ) ):
UpperCAmelCase : List[str] = audio_features[i]
UpperCAmelCase : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
UpperCAmelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCAmelCase : Optional[int] = {"audio_values": padded_audio_features}
UpperCAmelCase : Union[str, Any] = BatchFeature(data=snake_case , tensor_type=snake_case )
return encoded_inputs
| 311
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AudioLDMPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE__ : int = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : str = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(3_2, 6_4) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case , )
UpperCAmelCase : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : Dict = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
UpperCAmelCase : Dict = ClapTextModelWithProjection(snake_case )
UpperCAmelCase : Union[str, Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=7_7 )
UpperCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case , )
UpperCAmelCase : Optional[Any] = SpeechTaHifiGan(snake_case )
UpperCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def A_ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
if str(snake_case ).startswith("mps" ):
UpperCAmelCase : Union[str, Any] = torch.manual_seed(snake_case )
else:
UpperCAmelCase : Union[str, Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCAmelCase : str = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Tuple = self.get_dummy_components()
UpperCAmelCase : int = AudioLDMPipeline(**snake_case )
UpperCAmelCase : Dict = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : str = self.get_dummy_inputs(snake_case )
UpperCAmelCase : int = audioldm_pipe(**snake_case )
UpperCAmelCase : str = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 2_5_6
UpperCAmelCase : int = audio[:1_0]
UpperCAmelCase : Union[str, Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : Tuple = AudioLDMPipeline(**snake_case )
UpperCAmelCase : Tuple = audioldm_pipe.to(snake_case )
UpperCAmelCase : Optional[int] = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : List[str] = self.get_dummy_inputs(snake_case )
UpperCAmelCase : Optional[int] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase : List[Any] = audioldm_pipe(**snake_case )
UpperCAmelCase : str = output.audios[0]
UpperCAmelCase : Tuple = self.get_dummy_inputs(snake_case )
UpperCAmelCase : Optional[int] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase : Dict = audioldm_pipe.tokenizer(
snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case , return_tensors="pt" , )
UpperCAmelCase : Tuple = text_inputs["input_ids"].to(snake_case )
UpperCAmelCase : Tuple = audioldm_pipe.text_encoder(
snake_case , )
UpperCAmelCase : List[str] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase : Any = F.normalize(snake_case , dim=-1 )
UpperCAmelCase : List[Any] = prompt_embeds
# forward
UpperCAmelCase : str = audioldm_pipe(**snake_case )
UpperCAmelCase : int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.get_dummy_components()
UpperCAmelCase : List[Any] = AudioLDMPipeline(**snake_case )
UpperCAmelCase : Optional[int] = audioldm_pipe.to(snake_case )
UpperCAmelCase : Any = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : Dict = self.get_dummy_inputs(snake_case )
UpperCAmelCase : str = 3 * ["this is a negative prompt"]
UpperCAmelCase : List[str] = negative_prompt
UpperCAmelCase : str = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase : List[str] = audioldm_pipe(**snake_case )
UpperCAmelCase : Optional[int] = output.audios[0]
UpperCAmelCase : Dict = self.get_dummy_inputs(snake_case )
UpperCAmelCase : int = 3 * [inputs.pop("prompt" )]
UpperCAmelCase : Union[str, Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase : Optional[Any] = audioldm_pipe.tokenizer(
snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case , return_tensors="pt" , )
UpperCAmelCase : int = text_inputs["input_ids"].to(snake_case )
UpperCAmelCase : str = audioldm_pipe.text_encoder(
snake_case , )
UpperCAmelCase : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase : Optional[Any] = F.normalize(snake_case , dim=-1 )
embeds.append(snake_case )
UpperCAmelCase , UpperCAmelCase : Dict = embeds
# forward
UpperCAmelCase : List[str] = audioldm_pipe(**snake_case )
UpperCAmelCase : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : List[Any] = self.get_dummy_components()
UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=snake_case )
UpperCAmelCase : int = AudioLDMPipeline(**snake_case )
UpperCAmelCase : Optional[int] = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : Any = self.get_dummy_inputs(snake_case )
UpperCAmelCase : Dict = "egg cracking"
UpperCAmelCase : str = audioldm_pipe(**snake_case , negative_prompt=snake_case )
UpperCAmelCase : str = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 2_5_6
UpperCAmelCase : Optional[Any] = audio[:1_0]
UpperCAmelCase : List[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=snake_case )
UpperCAmelCase : Optional[Any] = AudioLDMPipeline(**snake_case )
UpperCAmelCase : Union[str, Any] = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : List[str] = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase : List[Any] = audioldm_pipe(snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase : int = 2
UpperCAmelCase : int = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase : List[str] = 2
UpperCAmelCase : Tuple = audioldm_pipe(snake_case , num_inference_steps=2 , num_waveforms_per_prompt=snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase : Any = 2
UpperCAmelCase : Dict = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Optional[int] = self.get_dummy_components()
UpperCAmelCase : str = AudioLDMPipeline(**snake_case )
UpperCAmelCase : List[Any] = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : Dict = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case )
UpperCAmelCase : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 , **snake_case )
UpperCAmelCase : Dict = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase : str = audioldm_pipe(audio_length_in_s=0.032 , **snake_case )
UpperCAmelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) / vocoder_sampling_rate == 0.032
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase : Any = AudioLDMPipeline(**snake_case )
UpperCAmelCase : int = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : Dict = ["hey"]
UpperCAmelCase : Any = audioldm_pipe(snake_case , num_inference_steps=1 )
UpperCAmelCase : Any = output.audios.shape
assert audio_shape == (1, 2_5_6)
UpperCAmelCase : Any = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase : str = SpeechTaHifiGan(snake_case ).to(snake_case )
UpperCAmelCase : Optional[Any] = audioldm_pipe(snake_case , num_inference_steps=1 )
UpperCAmelCase : Tuple = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def A_ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case )
def A_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case )
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self , snake_case , snake_case="cpu" , snake_case=torch.floataa , snake_case=0 ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCAmelCase : Dict = np.random.RandomState(snake_case ).standard_normal((1, 8, 1_2_8, 1_6) )
UpperCAmelCase : Optional[int] = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
UpperCAmelCase : str = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase : Dict = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : str = self.get_inputs(snake_case )
UpperCAmelCase : Any = 2_5
UpperCAmelCase : Union[str, Any] = audioldm_pipe(**snake_case ).audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 8_1_9_2_0
UpperCAmelCase : Dict = audio[7_7_2_3_0:7_7_2_4_0]
UpperCAmelCase : Tuple = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase : int = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : List[str] = self.get_inputs(snake_case )
UpperCAmelCase : Dict = audioldm_pipe(**snake_case ).audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 8_1_9_2_0
UpperCAmelCase : Tuple = audio[2_7_7_8_0:2_7_7_9_0]
UpperCAmelCase : List[Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 311
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 1
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a : Optional[Any] = "src/transformers"
a : int = "docs/source/en/tasks"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : Any = f.readlines()
# Find the start prompt.
UpperCAmelCase : Any = 0
while not lines[start_index].startswith(__magic_name__ ):
start_index += 1
start_index += 1
UpperCAmelCase : Dict = start_index
while not lines[end_index].startswith(__magic_name__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a : Any = direct_transformers_import(TRANSFORMERS_PATH)
a : Tuple = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a : Tuple = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TASK_GUIDE_TO_MODELS[task_guide]
UpperCAmelCase : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__magic_name__ , set() )
UpperCAmelCase : Optional[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def lowercase ( __magic_name__ , __magic_name__=False ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = _find_text_in_file(
filename=os.path.join(__magic_name__ , __magic_name__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
UpperCAmelCase : Tuple = get_model_list_for_task(__magic_name__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(__magic_name__ , __magic_name__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 311
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a : Optional[Any] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 1
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : List[Any] = use_input_mask
UpperCAmelCase : Union[str, Any] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Optional[int] = embedding_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Any = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : List[str] = num_choices
UpperCAmelCase : Dict = scope
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Dict = None
if self.use_input_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : str = None
if self.use_token_type_ids:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Any = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = MobileBertModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
UpperCAmelCase : str = model(snake_case , token_type_ids=snake_case )
UpperCAmelCase : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = MobileBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = MobileBertForNextSentencePrediction(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = MobileBertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = MobileBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Union[str, Any] = MobileBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Tuple = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.num_labels
UpperCAmelCase : int = MobileBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = self.num_choices
UpperCAmelCase : Dict = MobileBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = config_and_inputs
UpperCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Dict = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = True
def A_ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : str = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
UpperCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = MobileBertModelTester(self )
UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return torch.tensor(
__magic_name__ , dtype=torch.long , device=__magic_name__ , )
a : List[Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(snake_case )
UpperCAmelCase : Optional[Any] = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(snake_case )[0]
UpperCAmelCase : Tuple = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : List[Any] = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=snake_case , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 311
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[int] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 311
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : int = logging.get_logger(__name__)
a : Union[str, Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a : Dict = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
a : List[Any] = {"facebook/blenderbot-3B": 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase : Dict = bs[:]
UpperCAmelCase : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__magic_name__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase : str = [chr(__magic_name__ ) for n in cs]
return dict(zip(__magic_name__ , __magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = set()
UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : str = char
return pairs
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
UpperCAmelCase : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
UpperCAmelCase : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
UpperCAmelCase : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
UpperCAmelCase : Any = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
UpperCAmelCase : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase : Optional[Any] = json.load(snake_case )
UpperCAmelCase : str = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Dict = errors # how to handle errors in decoding
UpperCAmelCase : Optional[Any] = bytes_to_unicode()
UpperCAmelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding="utf-8" ) as merges_handle:
UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase : Any = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase : str = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def A_ ( self ):
'''simple docstring'''
return len(self.encoder )
def A_ ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , snake_case ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase : List[str] = tuple(snake_case )
UpperCAmelCase : List[Any] = get_pairs(snake_case )
if not pairs:
return token
while True:
UpperCAmelCase : Union[str, Any] = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : List[Any] = bigram
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : List[Any] = 0
while i < len(snake_case ):
try:
UpperCAmelCase : Union[str, Any] = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : Optional[int] = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : List[Any] = tuple(snake_case )
UpperCAmelCase : Dict = new_word
if len(snake_case ) == 1:
break
else:
UpperCAmelCase : List[str] = get_pairs(snake_case )
UpperCAmelCase : Dict = " ".join(snake_case )
UpperCAmelCase : List[Any] = word
return word
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = []
for token in re.findall(self.pat , snake_case ):
UpperCAmelCase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(" " ) )
return bpe_tokens
def A_ ( self , snake_case ):
'''simple docstring'''
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def A_ ( self , snake_case ):
'''simple docstring'''
return self.decoder.get(snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = "".join(snake_case )
UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : List[Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase : str = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + "\n" )
UpperCAmelCase : List[str] = 0
with open(snake_case , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase : Optional[int] = token_index
writer.write(" ".join(snake_case ) + "\n" )
index += 1
return vocab_file, merge_file
def A_ ( self , snake_case , snake_case = None , snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case=False , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
UpperCAmelCase : List[Any] = " " + text
return (text, kwargs)
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case )
UpperCAmelCase : Optional[int] = " ".join(snake_case )
UpperCAmelCase : List[str] = self.encode(snake_case )
if len(snake_case ) > self.model_max_length:
UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 311
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a : Optional[int] = logging.get_logger(__name__)
a : Any = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "marian"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["past_key_values"]
SCREAMING_SNAKE_CASE__ : Dict = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case=5_8_1_0_1 , snake_case=None , snake_case=1_0_2_4 , snake_case=1_2 , snake_case=4_0_9_6 , snake_case=1_6 , snake_case=1_2 , snake_case=4_0_9_6 , snake_case=1_6 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=1_0_2_4 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=5_8_1_0_0 , snake_case=False , snake_case=5_8_1_0_0 , snake_case=0 , snake_case=0 , snake_case=True , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Dict = decoder_vocab_size or vocab_size
UpperCAmelCase : int = max_position_embeddings
UpperCAmelCase : Any = d_model
UpperCAmelCase : Optional[Any] = encoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Any = encoder_attention_heads
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : Union[str, Any] = decoder_layers
UpperCAmelCase : str = decoder_attention_heads
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Optional[int] = activation_dropout
UpperCAmelCase : Optional[Any] = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : List[Any] = encoder_layerdrop
UpperCAmelCase : Tuple = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = encoder_layers
UpperCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : List[str] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , forced_eos_token_id=snake_case , **snake_case , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase : Union[str, Any] = {0: "batch"}
UpperCAmelCase : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase : List[Any] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase : int = self.num_layers
for i in range(snake_case ):
UpperCAmelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCAmelCase : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : str = super().outputs
else:
UpperCAmelCase : Union[str, Any] = super(snake_case , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.num_layers
for i in range(snake_case ):
UpperCAmelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def A_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Generate decoder inputs
UpperCAmelCase : Optional[Any] = seq_length if not self.use_past else 1
UpperCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase : Any = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase : str = dict(**snake_case , **snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase , UpperCAmelCase : Optional[int] = common_inputs["input_ids"].shape
UpperCAmelCase : Any = common_inputs["decoder_input_ids"].shape[1]
UpperCAmelCase , UpperCAmelCase : List[str] = self.num_attention_heads
UpperCAmelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase : Union[str, Any] = decoder_seq_length + 3
UpperCAmelCase : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase : List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(snake_case , snake_case )] , dim=1 )
UpperCAmelCase : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.num_layers
UpperCAmelCase : List[Any] = min(snake_case , snake_case )
UpperCAmelCase : Optional[Any] = max(snake_case , snake_case ) - min_num_layers
UpperCAmelCase : Optional[Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
) )
# TODO: test this.
UpperCAmelCase : int = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(snake_case , snake_case ):
common_inputs["past_key_values"].append((torch.zeros(snake_case ), torch.zeros(snake_case )) )
return common_inputs
def A_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase , UpperCAmelCase : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase : Dict = seqlen + 2
UpperCAmelCase , UpperCAmelCase : List[str] = self.num_layers
UpperCAmelCase , UpperCAmelCase : int = self.num_attention_heads
UpperCAmelCase : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase : Dict = common_inputs["attention_mask"].dtype
UpperCAmelCase : Union[str, Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 )
UpperCAmelCase : Union[str, Any] = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(snake_case )
]
return common_inputs
def A_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : List[str] = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : List[Any] = tokenizer.num_special_tokens_to_add(snake_case )
UpperCAmelCase : Dict = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : Optional[Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase : Tuple = dict(tokenizer(snake_case , return_tensors=snake_case ) )
return common_inputs
def A_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
else:
UpperCAmelCase : Any = self._generate_dummy_inputs_for_causal_lm(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
return common_inputs
def A_ ( self , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Optional[Any] = super()._flatten_past_key_values_(snake_case , snake_case , snake_case , snake_case )
else:
UpperCAmelCase : List[Any] = super(snake_case , self )._flatten_past_key_values_(
snake_case , snake_case , snake_case , snake_case )
@property
def A_ ( self ):
'''simple docstring'''
return 1e-4
| 311
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 1
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase__ ( yaml.SafeLoader ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCAmelCase : Optional[Any] = [tuple(snake_case ) if isinstance(snake_case , snake_case ) else key for key in keys]
UpperCAmelCase : Any = Counter(snake_case )
UpperCAmelCase : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" )
def A_ ( self , snake_case , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : Any = super().construct_mapping(snake_case , deep=snake_case )
self._check_no_duplicates_on_constructed_node(snake_case )
return mapping
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCAmelCase : List[str] = full_content[1:].index("---" ) + 1
UpperCAmelCase : List[str] = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__magic_name__ )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def A_ ( cls , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as readme_file:
UpperCAmelCase , UpperCAmelCase : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(snake_case )
else:
return cls()
def A_ ( self , snake_case ):
'''simple docstring'''
if path.exists():
with open(snake_case , encoding="utf-8" ) as readme_file:
UpperCAmelCase : Union[str, Any] = readme_file.read()
else:
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = self._to_readme(snake_case )
with open(snake_case , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(snake_case )
def A_ ( self , snake_case = None ):
'''simple docstring'''
if readme_content is not None:
UpperCAmelCase , UpperCAmelCase : Any = _split_yaml_from_readme(snake_case )
UpperCAmelCase : Optional[int] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
UpperCAmelCase : Optional[int] = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def A_ ( cls , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = yaml.load(snake_case , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCAmelCase : List[str] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**snake_case )
def A_ ( self ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=snake_case , allow_unicode=snake_case , encoding="utf-8" , ).decode("utf-8" )
a : Optional[int] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a : Union[str, Any] = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
a : List[Any] = ap.parse_args()
a : Dict = Path(args.readme_filepath)
a : List[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 311
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 1
|
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = k_size // 2
UpperCAmelCase , UpperCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCAmelCase : List[str] = 1 / (2 * pi * sigma) * exp(-(square(__magic_name__ ) + square(__magic_name__ )) / (2 * square(__magic_name__ )) )
return g
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
UpperCAmelCase : Any = height - k_size + 1
UpperCAmelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
UpperCAmelCase : str = 0
for i, j in product(range(__magic_name__ ) , range(__magic_name__ ) ):
UpperCAmelCase : List[Any] = ravel(image[i : i + k_size, j : j + k_size] )
UpperCAmelCase : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCAmelCase : Dict = gen_gaussian_kernel(__magic_name__ , __magic_name__ )
UpperCAmelCase : str = ravel(__magic_name__ )
# reshape and get the dst image
UpperCAmelCase : Any = dot(__magic_name__ , __magic_name__ ).reshape(__magic_name__ , __magic_name__ ).astype(__magic_name__ )
return dst
if __name__ == "__main__":
# read original image
a : Any = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
a : Dict = gaussian_filter(gray, 3, sigma=1)
a : Optional[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 311
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(snake_case )
self.set_fail_transitions()
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = 0
for character in keyword:
UpperCAmelCase : List[Any] = self.find_next_state(snake_case , snake_case )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase : int = len(self.adlist ) - 1
else:
UpperCAmelCase : Optional[Any] = next_state
self.adlist[current_state]["output"].append(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(snake_case )
UpperCAmelCase : List[str] = 0
while q:
UpperCAmelCase : List[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(snake_case )
UpperCAmelCase : Optional[int] = self.adlist[r]["fail_state"]
while (
self.find_next_state(snake_case , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase : Tuple = self.adlist[state]["fail_state"]
UpperCAmelCase : Union[str, Any] = self.find_next_state(
snake_case , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Dict = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : dict = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase : List[Any] = 0
for i in range(len(snake_case ) ):
while (
self.find_next_state(snake_case , string[i] ) is None
and current_state != 0
):
UpperCAmelCase : Optional[Any] = self.adlist[current_state]["fail_state"]
UpperCAmelCase : Optional[Any] = self.find_next_state(snake_case , string[i] )
if next_state is None:
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Optional[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase : List[Any] = []
result[key].append(i - len(snake_case ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 1
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
a : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
a : int = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
a : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
a : List[str] = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
a : List[Any] = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
a : Any = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
a : Any = tf.keras.preprocessing.image.img_to_array(test_image)
a : List[str] = np.expand_dims(test_image, axis=0)
a : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
a : Dict = "Normal"
if result[0][0] == 1:
a : str = "Abnormality detected"
| 311
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a : Dict = None
a : int = logging.get_logger(__name__)
a : Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a : Union[str, Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
a : Optional[Any] = {
"google/bigbird-roberta-base": 40_96,
"google/bigbird-roberta-large": 40_96,
"google/bigbird-base-trivia-itc": 40_96,
}
a : str = "▁"
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , snake_case=None , snake_case=None , snake_case="<unk>" , snake_case="<s>" , snake_case="</s>" , snake_case="<pad>" , snake_case="[SEP]" , snake_case="[MASK]" , snake_case="[CLS]" , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
UpperCAmelCase : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
UpperCAmelCase : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
UpperCAmelCase : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
UpperCAmelCase : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , **snake_case , )
UpperCAmelCase : int = vocab_file
UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A_ ( self , snake_case , snake_case = None , snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Tuple = [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : Optional[Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 311
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 1
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowercase ( __magic_name__ = "laptop" ):
'''simple docstring'''
UpperCAmelCase : Tuple = F"https://www.amazon.in/laptop/s?k={product}"
UpperCAmelCase : int = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
UpperCAmelCase : List[str] = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
UpperCAmelCase : Any = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
UpperCAmelCase : int = item.ha.text
UpperCAmelCase : int = "https://www.amazon.in/" + item.ha.a["href"]
UpperCAmelCase : str = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
UpperCAmelCase : str = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
UpperCAmelCase : Union[str, Any] = "Not available"
try:
UpperCAmelCase : Dict = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
UpperCAmelCase : int = ""
try:
UpperCAmelCase : Tuple = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
UpperCAmelCase : Union[str, Any] = float("nan" )
except AttributeError:
pass
UpperCAmelCase : List[str] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
UpperCAmelCase : List[str] = " "
UpperCAmelCase : Optional[int] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
a : Optional[Any] = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 311
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
UpperCAmelCase : list[float] = list(snake_case )
UpperCAmelCase : int = degree
def __add__( self , snake_case ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
UpperCAmelCase : int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , snake_case )
else:
UpperCAmelCase : str = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , snake_case )
def __sub__( self , snake_case ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(snake_case )
return polynomial
def __repr__( self ):
'''simple docstring'''
return self.__str__()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase : List[str] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , snake_case )
def A_ ( self , snake_case = 0 ):
'''simple docstring'''
UpperCAmelCase : list[float] = [0] * (self.degree + 2)
UpperCAmelCase : List[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , snake_case )
def __eq__( self , snake_case ):
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , snake_case ):
'''simple docstring'''
return not self.__eq__(snake_case )
| 311
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 1
|
'''simple docstring'''
import numpy
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase : int = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase : Any = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCAmelCase : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase : Union[str, Any] = numpy.zeros(output_array.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase : List[str] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase : Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCAmelCase : Any = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCAmelCase : str = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
UpperCAmelCase : str = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase : Union[str, Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = input_arr
UpperCAmelCase : Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCAmelCase : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCAmelCase : Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
return (value) * (1 - (value))
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCAmelCase : Optional[int] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=__magic_name__ , output_array=__magic_name__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__magic_name__ , iterations=10 , give_loss=__magic_name__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 311
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(snake_case , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(snake_case , "num_attention_heads" ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=3_2 , snake_case=2 , snake_case=3 , snake_case=6_4_0 , snake_case=4 , snake_case="silu" , snake_case=3 , snake_case=3_2 , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=1_0 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : Union[str, Any] = patch_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Any = last_hidden_size
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : str = conv_kernel_size
UpperCAmelCase : str = output_stride
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : List[str] = classifier_dropout_prob
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : int = num_labels
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : int = scope
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def A_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = MobileViTModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Dict = MobileViTForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Tuple = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MobileViTModelTester(self )
UpperCAmelCase : List[str] = MobileViTConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(snake_case )
UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : Dict = outputs.hidden_states
UpperCAmelCase : Optional[Any] = 5
self.assertEqual(len(snake_case ) , snake_case )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase : Any = 2
for i in range(len(snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Any = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Dict = MobileViTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(snake_case )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : List[Any] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase : Dict = model(**snake_case )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : Dict = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCAmelCase : Optional[Any] = model.to(snake_case )
UpperCAmelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase : Any = model(**snake_case )
UpperCAmelCase : int = outputs.logits
# verify the logits
UpperCAmelCase : Optional[int] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCAmelCase : Optional[int] = model.to(snake_case )
UpperCAmelCase : Any = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase : Dict = model(**snake_case )
UpperCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
UpperCAmelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(5_0, 6_0)] )
UpperCAmelCase : Union[str, Any] = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , snake_case )
UpperCAmelCase : int = image_processor.post_process_semantic_segmentation(outputs=snake_case )
UpperCAmelCase : Optional[Any] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , snake_case )
| 311
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 1_2_8, "min_length": 1_2, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 1_4_2, "min_length": 5_6, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 6_2, "min_length": 1_1, "num_beams": 6},
}
}
UpperCAmelCase : Dict = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 1_2_8,
"task_specific_params.summarization.min_length": 1_2,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 1_4_2,
"task_specific_params.summarization_cnn.min_length": 5_6,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 6_2,
"task_specific_params.summarization_xsum.min_length": 1_1,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
UpperCAmelCase : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : str = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
UpperCAmelCase : int = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (1_2, 5) ) , np.reshape(snake_case , (1_2, 5) ) ) )
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (1_2, 5) ) , reshape(snake_case , (1_2, 5) ).numpy() ) )
@require_tf
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[Any] = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Any = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (1_2, 5) ) , reshape(snake_case , (1_2, 5) ).numpy() ) )
@require_flax
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (1_2, 5) ) , np.asarray(reshape(snake_case , (1_2, 5) ) ) ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
UpperCAmelCase : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[Any] = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : List[Any] = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[Any] = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[int] = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
UpperCAmelCase : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Union[str, Any] = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Dict = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase : int = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 311
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a : Union[str, Any] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["pixel_values"]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = True , snake_case = None , snake_case = None , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = size if size is not None else {"shortest_edge": 2_5_6}
UpperCAmelCase : Optional[int] = get_size_dict(snake_case , default_to_square=snake_case )
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCAmelCase : Any = get_size_dict(snake_case , param_name="crop_size" )
UpperCAmelCase : Any = do_resize
UpperCAmelCase : Dict = size
UpperCAmelCase : Any = do_center_crop
UpperCAmelCase : int = crop_size
UpperCAmelCase : Union[str, Any] = resample
UpperCAmelCase : Optional[int] = do_rescale
UpperCAmelCase : Dict = rescale_factor
UpperCAmelCase : List[str] = offset
UpperCAmelCase : int = do_normalize
UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : int = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" in size:
UpperCAmelCase : str = get_resize_output_image_size(snake_case , size["shortest_edge"] , default_to_square=snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase : str = (size["height"], size["width"])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def A_ ( self , snake_case , snake_case , snake_case = True , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : int = image.astype(np.floataa )
if offset:
UpperCAmelCase : Optional[int] = image - (scale / 2)
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def A_ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
'''simple docstring'''
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase : List[str] = to_numpy_array(snake_case )
if do_resize:
UpperCAmelCase : List[str] = self.resize(image=snake_case , size=snake_case , resample=snake_case )
if do_center_crop:
UpperCAmelCase : str = self.center_crop(snake_case , size=snake_case )
if do_rescale:
UpperCAmelCase : Union[str, Any] = self.rescale(image=snake_case , scale=snake_case , offset=snake_case )
if do_normalize:
UpperCAmelCase : Optional[Any] = self.normalize(image=snake_case , mean=snake_case , std=snake_case )
UpperCAmelCase : Optional[int] = to_channel_dimension_format(snake_case , snake_case )
return image
def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : int = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : str = resample if resample is not None else self.resample
UpperCAmelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Tuple = offset if offset is not None else self.offset
UpperCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase : str = size if size is not None else self.size
UpperCAmelCase : List[str] = get_size_dict(snake_case , default_to_square=snake_case )
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : Tuple = get_size_dict(snake_case , param_name="crop_size" )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase : Optional[Any] = make_batched(snake_case )
UpperCAmelCase : Optional[int] = [
[
self._preprocess_image(
image=snake_case , do_resize=snake_case , size=snake_case , resample=snake_case , do_center_crop=snake_case , crop_size=snake_case , do_rescale=snake_case , rescale_factor=snake_case , offset=snake_case , do_normalize=snake_case , image_mean=snake_case , image_std=snake_case , data_format=snake_case , )
for img in video
]
for video in videos
]
UpperCAmelCase : Optional[Any] = {"pixel_values": videos}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 311
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 1
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
a : List[str] = parser.parse_args()
a : Any = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 311
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 1
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
UpperCAmelCase : Tuple = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , __magic_name__ )
if matches:
UpperCAmelCase : int = float(matches[1] )
UpperCAmelCase : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase : str = 1001
UpperCAmelCase : int = "imagenet-1k-id2label.json"
UpperCAmelCase : Tuple = "huggingface/label-files"
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : Optional[int] = {int(__magic_name__ ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase : Optional[int] = "background"
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = get_mobilenet_va_config(__magic_name__ )
# Load 🤗 model
UpperCAmelCase : Tuple = MobileNetVaForImageClassification(__magic_name__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__magic_name__ , __magic_name__ , __magic_name__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase : Optional[int] = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
UpperCAmelCase : Optional[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : Union[str, Any] = model(**__magic_name__ )
UpperCAmelCase : Tuple = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase : int = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase : Tuple = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
UpperCAmelCase : Optional[int] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = "google/" + model_name
image_processor.push_to_hub(__magic_name__ )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : Optional[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 311
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
a : List[str] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
a : Tuple = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for i in range(len(__magic_name__ ) ):
UpperCAmelCase : Union[str, Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCAmelCase : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__magic_name__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__magic_name__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__magic_name__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCAmelCase : Optional[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__magic_name__ )
return next_generation
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for _ in range(__magic_name__ ):
# Create output image
UpperCAmelCase : List[Any] = Image.new("RGB" , (len(cells[0] ), len(__magic_name__ )) )
UpperCAmelCase : Dict = img.load()
# Save cells to image
for x in range(len(__magic_name__ ) ):
for y in range(len(cells[0] ) ):
UpperCAmelCase : Dict = 255 - cells[y][x] * 255
UpperCAmelCase : Tuple = (colour, colour, colour)
# Save image
images.append(__magic_name__ )
UpperCAmelCase : str = new_generation(__magic_name__ )
return images
if __name__ == "__main__":
a : Any = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 311
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "luke"
def __init__( self , snake_case=5_0_2_6_7 , snake_case=5_0_0_0_0_0 , snake_case=7_6_8 , snake_case=2_5_6 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=1e-12 , snake_case=True , snake_case=None , snake_case=1 , snake_case=0 , snake_case=2 , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Any = entity_vocab_size
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : List[Any] = entity_emb_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : Any = use_entity_aware_attention
UpperCAmelCase : Optional[int] = classifier_dropout
| 311
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 1
|
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = len(__magic_name__ ) + 1
UpperCAmelCase : List[Any] = len(__magic_name__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase : str = [[0 for i in range(__magic_name__ )] for j in range(__magic_name__ )]
# since string of zero length match pattern of zero length
UpperCAmelCase : List[str] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __magic_name__ ):
UpperCAmelCase : List[str] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __magic_name__ ):
for j in range(1 , __magic_name__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase : List[str] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase : List[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase : List[Any] = dp[i - 1][j]
else:
UpperCAmelCase : str = 0
else:
UpperCAmelCase : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a : str = "aab"
a : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}')
| 311
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 1
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__magic_name__ , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = _distribute_shards(**__magic_name__ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = _split_gen_kwargs(__magic_name__ , __magic_name__ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__magic_name__ ):
_number_of_shards_in_gen_kwargs(__magic_name__ )
else:
UpperCAmelCase : str = _number_of_shards_in_gen_kwargs(__magic_name__ )
assert out == expected
| 311
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 1
|
'''simple docstring'''
import string
def lowercase ( __magic_name__ ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase : List[Any] = ""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase : str = string.ascii_uppercase.find(__magic_name__ )
UpperCAmelCase : Dict = num - key
if num < 0:
UpperCAmelCase : List[Any] = num + len(string.ascii_uppercase )
UpperCAmelCase : Any = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase : Optional[int] = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = input("Encrypted message: " )
UpperCAmelCase : Optional[Any] = message.upper()
decrypt(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a : Any = TypeVar("T")
a : Dict = Union[List[T], Tuple[T, ...]]
a : Optional[int] = Union[T, List[T], Dict[str, T]]
a : Union[str, Any] = Union[str, bytes, os.PathLike]
| 311
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 1
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a : Union[str, Any] = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
a : Optional[Any] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
a : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def A_ ( self , snake_case , snake_case , snake_case=4 , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : Any = compute_bleu(
reference_corpus=snake_case , translation_corpus=snake_case , max_order=snake_case , smooth=snake_case )
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Optional[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 311
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 1
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def lowercase ( __magic_name__ ):
'''simple docstring'''
if hor == 128:
UpperCAmelCase : Optional[int] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCAmelCase : Dict = (32, 128, 256)
UpperCAmelCase : Any = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
UpperCAmelCase : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCAmelCase : List[str] = (32, 64, 128, 256)
UpperCAmelCase : Optional[int] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
UpperCAmelCase : List[Any] = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
UpperCAmelCase : str = model.state_dict()
UpperCAmelCase : Optional[Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
UpperCAmelCase : Union[str, Any] = UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase : Optional[int] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase : Dict = state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
UpperCAmelCase : Tuple = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
UpperCAmelCase : int = model
UpperCAmelCase : List[str] = UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase : Tuple = state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 311
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 1
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
a : int = get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "dummy_data"
SCREAMING_SNAKE_CASE__ : Optional[int] = "datasets"
SCREAMING_SNAKE_CASE__ : Dict = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = 0
UpperCAmelCase : List[str] = dataset_name
UpperCAmelCase : Optional[Any] = cache_dir
UpperCAmelCase : List[Any] = use_local_dummy_data
UpperCAmelCase : Tuple = config
# download_callbacks take a single url as input
UpperCAmelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCAmelCase : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCAmelCase : Tuple = str(snake_case )
# to be downloaded
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Dict = None
@property
def A_ ( self ):
'''simple docstring'''
if self._dummy_file is None:
UpperCAmelCase : Dict = self.download_dummy_data()
return self._dummy_file
@property
def A_ ( self ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def A_ ( self ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCAmelCase : Optional[Any] = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def A_ ( self ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def A_ ( self ):
'''simple docstring'''
if self._bucket_url is None:
UpperCAmelCase : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def A_ ( self ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def A_ ( self , snake_case , *snake_case ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCAmelCase : Optional[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCAmelCase : Dict = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def A_ ( self , snake_case , *snake_case ):
'''simple docstring'''
return self.download_and_extract(snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.download_and_extract(snake_case )
def A_ ( self , snake_case , *snake_case , **snake_case ):
'''simple docstring'''
return path
def A_ ( self ):
'''simple docstring'''
return {}
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
UpperCAmelCase : str = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
UpperCAmelCase : List[Any] = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
UpperCAmelCase : Any = single_urls
UpperCAmelCase : int = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
UpperCAmelCase : Optional[Any] = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCAmelCase : Any = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCAmelCase : List[Any] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , snake_case ) ) for url in data_url )
UpperCAmelCase : Tuple = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCAmelCase : Union[str, Any] = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase : Tuple = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase : List[str] = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self , snake_case ):
'''simple docstring'''
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
UpperCAmelCase : Any = Path(self.dummy_file ).parent
UpperCAmelCase : List[Any] = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCAmelCase : str = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
UpperCAmelCase : Optional[int] = Path(snake_case )
UpperCAmelCase : Union[str, Any] = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open("rb" )
def A_ ( self , snake_case ):
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
UpperCAmelCase : List[str] = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(snake_case , snake_case )
| 311
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 1
|
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase : List[str] = F"Expected string as input, found {type(__magic_name__ )}"
raise ValueError(__magic_name__ )
if not isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"Expected boolean as use_pascal parameter, found {type(__magic_name__ )}"
raise ValueError(__magic_name__ )
UpperCAmelCase : List[Any] = input_str.split("_" )
UpperCAmelCase : str = 0 if use_pascal else 1
UpperCAmelCase : Optional[Any] = words[start_index:]
UpperCAmelCase : List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase : List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 311
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 1
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ""
SCREAMING_SNAKE_CASE__ : Optional[int] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , snake_case = None , snake_case = None , **snake_case , ):
'''simple docstring'''
super().__init__(self , **snake_case )
UpperCAmelCase : str = repo_info
UpperCAmelCase : Optional[int] = token
UpperCAmelCase : Optional[int] = None
def A_ ( self ):
'''simple docstring'''
if self.dir_cache is None:
UpperCAmelCase : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase : Union[str, Any] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(snake_case ): {"name": str(snake_case ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A_ ( self , snake_case , snake_case = "rb" , **snake_case , ):
'''simple docstring'''
if not isinstance(self.repo_info , snake_case ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase : Tuple = hf_hub_url(self.repo_info.id , snake_case , revision=self.repo_info.sha )
return fsspec.open(
snake_case , mode=snake_case , headers=get_authentication_headers_for_url(snake_case , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def A_ ( self , snake_case , **snake_case ):
'''simple docstring'''
self._get_dirs()
UpperCAmelCase : List[str] = self._strip_protocol(snake_case )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(snake_case )
def A_ ( self , snake_case , snake_case=False , **snake_case ):
'''simple docstring'''
self._get_dirs()
UpperCAmelCase : Any = PurePosixPath(path.strip("/" ) )
UpperCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase : List[str] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase : Optional[Any] = p.parent
if root == path:
UpperCAmelCase : Optional[int] = f
UpperCAmelCase : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 311
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Any = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "realm"
def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=1_2_8 , snake_case=1_2 , snake_case=1_2 , snake_case=8 , snake_case=3_0_7_2 , snake_case="gelu_new" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=1e-12 , snake_case=2_5_6 , snake_case=1_0 , snake_case=1e-3 , snake_case=5 , snake_case=3_2_0 , snake_case=1_3_3_5_3_7_1_8 , snake_case=5_0_0_0 , snake_case=1 , snake_case=0 , snake_case=2 , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
# Common config
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[Any] = retriever_proj_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Optional[Any] = num_candidates
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : Any = layer_norm_eps
# Reader config
UpperCAmelCase : Any = span_hidden_size
UpperCAmelCase : List[Any] = max_span_width
UpperCAmelCase : Any = reader_layer_norm_eps
UpperCAmelCase : Union[str, Any] = reader_beam_size
UpperCAmelCase : List[Any] = reader_seq_len
# Retrieval config
UpperCAmelCase : Union[str, Any] = num_block_records
UpperCAmelCase : Any = searcher_beam_size
| 311
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Union[str, Path]] = None
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : Optional[Dict] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = None
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : Optional[Dict] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
def A_ ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case ) for k, v in self.__dict__.items()} )
| 311
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
a : str = {
"google/realm-cc-news-pretrained-embedder": 5_12,
"google/realm-cc-news-pretrained-encoder": 5_12,
"google/realm-cc-news-pretrained-scorer": 5_12,
"google/realm-cc-news-pretrained-openqa": 5_12,
"google/realm-orqa-nq-openqa": 5_12,
"google/realm-orqa-nq-reader": 5_12,
"google/realm-orqa-wq-openqa": 5_12,
"google/realm-orqa-wq-reader": 5_12,
}
a : int = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = RealmTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
UpperCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase : List[str] = getattr(snake_case , normalizer_state.pop("type" ) )
UpperCAmelCase : Optional[int] = do_lower_case
UpperCAmelCase : List[Any] = strip_accents
UpperCAmelCase : str = tokenize_chinese_chars
UpperCAmelCase : Tuple = normalizer_class(**snake_case )
UpperCAmelCase : str = do_lower_case
def A_ ( self , snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = PaddingStrategy.MAX_LENGTH
UpperCAmelCase : int = text
UpperCAmelCase : List[str] = kwargs.pop("text_pair" , snake_case )
UpperCAmelCase : Dict = kwargs.pop("return_tensors" , snake_case )
UpperCAmelCase : str = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(snake_case ):
if batch_text_pair is not None:
UpperCAmelCase : Any = batch_text_pair[idx]
else:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : List[str] = super().__call__(snake_case , snake_case , return_tensors=snake_case , **snake_case )
UpperCAmelCase : Dict = encoded_candidates.get("input_ids" )
UpperCAmelCase : Any = encoded_candidates.get("attention_mask" )
UpperCAmelCase : str = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case )
UpperCAmelCase : int = {key: item for key, item in output_data.items() if len(snake_case ) != 0}
return BatchEncoding(snake_case , tensor_type=snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 311
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
def lowercase ( __magic_name__ = 10**9 ):
'''simple docstring'''
UpperCAmelCase : List[str] = 1
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Optional[int] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 311
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 1
|
'''simple docstring'''
import math
def lowercase ( __magic_name__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__magic_name__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( __magic_name__ = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase : Tuple = int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase : list[int] = []
UpperCAmelCase : int = 2
while len(__magic_name__ ) < nth:
if is_prime(__magic_name__ ):
primes.append(__magic_name__ )
num += 1
else:
num += 1
return primes[len(__magic_name__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 311
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a : List[Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 1
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
a : Dict = datasets.logging.get_logger(__name__)
a : List[str] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
a : List[Any] = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
a : Optional[int] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__="dummy_doc" ):
'''simple docstring'''
UpperCAmelCase : Dict = {doc: key_lines}
UpperCAmelCase : Dict = {doc: sys_lines}
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : str = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
UpperCAmelCase , UpperCAmelCase : Tuple = reader.get_doc_mentions(__magic_name__ , key_doc_lines[doc] , __magic_name__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase : Optional[Any] = reader.set_annotated_parse_trees(__magic_name__ , key_doc_lines[doc] , __magic_name__ , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Dict = reader.get_doc_mentions(__magic_name__ , sys_doc_lines[doc] , __magic_name__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase : Any = reader.set_annotated_parse_trees(__magic_name__ , key_doc_lines[doc] , __magic_name__ , __magic_name__ )
if remove_nested:
UpperCAmelCase , UpperCAmelCase : str = reader.remove_nested_coref_mentions(__magic_name__ , __magic_name__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase , UpperCAmelCase : List[Any] = reader.remove_nested_coref_mentions(__magic_name__ , __magic_name__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase : int = reader.get_mention_assignments(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = reader.get_mention_assignments(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"Number of resulting singleton clusters in the key "
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"files, respectively" )
return doc_coref_infos
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = get_coref_infos(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[Any] = 0
for name, metric in metrics:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = evaluator.evaluate_documents(__magic_name__ , __magic_name__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
UpperCAmelCase : List[str] = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"conll_score": conll} )
return output_scores
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
UpperCAmelCase : Any = line.split()[5]
if not parse_col == "-":
UpperCAmelCase : List[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def A_ ( self , snake_case , snake_case , snake_case=True , snake_case=False , snake_case=False , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : Tuple = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
UpperCAmelCase : Dict = util.check_gold_parse_annotation(snake_case )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase : Dict = evaluate(
key_lines=snake_case , sys_lines=snake_case , metrics=snake_case , NP_only=snake_case , remove_nested=snake_case , keep_singletons=snake_case , min_span=snake_case , )
return score
| 311
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Union[str, Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ["OwlViTFeatureExtractor"]
a : List[Any] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
a : str = "\nHuman: <<task>>\n\nAssistant: "
a : int = "huggingface-tools/default-prompts"
a : List[str] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCAmelCase : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , __magic_name__ ) is not None:
return prompt_or_repo_id
UpperCAmelCase : List[str] = cached_file(
__magic_name__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(__magic_name__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 311
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 1
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a : Optional[int] = 16
a : str = 32
def lowercase ( __magic_name__ ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCamelCase__ :
"""simple docstring"""
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase : int = torch.cuda.memory_allocated()
UpperCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
UpperCAmelCase : Optional[int] = bamb(self.end - self.begin )
UpperCAmelCase : Dict = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowercase ( __magic_name__ , __magic_name__ = 16 , __magic_name__ = "bert-base-cased" , __magic_name__ = 320 , __magic_name__ = 160 , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__magic_name__ )
UpperCAmelCase : List[str] = load_dataset(
"glue" , "mrpc" , split={"train": F"train[:{n_train}]", "validation": F"validation[:{n_val}]"} )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase : List[str] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__magic_name__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase : Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__magic_name__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__magic_name__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCAmelCase : int = DataLoader(
tokenized_datasets["train"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase : Union[str, Any] = config["lr"]
UpperCAmelCase : str = int(config["num_epochs"] )
UpperCAmelCase : str = int(config["seed"] )
UpperCAmelCase : Any = int(config["batch_size"] )
UpperCAmelCase : Optional[int] = args.model_name_or_path
set_seed(__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
# Instantiate optimizer
UpperCAmelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase : List[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Union[str, Any] = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , )
else:
UpperCAmelCase : Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase : List[Any] = 0
# Now we train the model
UpperCAmelCase : Optional[Any] = {}
for epoch in range(__magic_name__ , __magic_name__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__magic_name__ ):
UpperCAmelCase : Tuple = model(**__magic_name__ )
UpperCAmelCase : Optional[Any] = outputs.loss
UpperCAmelCase : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase : Any = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Dict = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__magic_name__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__magic_name__ , )
parser.add_argument(
"--output_dir" , type=__magic_name__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__magic_name__ , default=__magic_name__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__magic_name__ , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__magic_name__ , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__magic_name__ , default=1 , help="Number of train epochs." , )
UpperCAmelCase : List[Any] = parser.parse_args()
UpperCAmelCase : Dict = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 311
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Union[str, Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = num_labels
UpperCAmelCase : int = num_choices
UpperCAmelCase : Any = scope
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = None
if self.use_input_mask:
UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , use_stable_embedding=snake_case , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Union[str, Any] = model(snake_case , attention_mask=snake_case )
UpperCAmelCase : List[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[str] = OpenLlamaModel(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : str = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
UpperCAmelCase : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : List[str] = True
UpperCAmelCase : Any = True
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
UpperCAmelCase : str = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
UpperCAmelCase : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )["hidden_states"][0]
UpperCAmelCase : int = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["hidden_states"][0]
# select random slice
UpperCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : str = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = OpenLlamaModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : Union[str, Any] = input_dict["input_ids"]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(snake_case )
UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : List[str] = OpenLlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Any = 3
UpperCAmelCase : Tuple = "single_label_classification"
UpperCAmelCase : Any = input_dict["input_ids"]
UpperCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(snake_case )
UpperCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : str = OpenLlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Dict = "multi_label_classification"
UpperCAmelCase : Optional[Any] = input_dict["input_ids"]
UpperCAmelCase : Tuple = input_ids.ne(1 ).to(snake_case )
UpperCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def A_ ( self ):
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Any = ids_tensor([1, 1_0] , config.vocab_size )
UpperCAmelCase : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : List[Any] = OpenLlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
UpperCAmelCase : int = original_model(snake_case ).last_hidden_state
UpperCAmelCase : Union[str, Any] = original_model(snake_case ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Dict = {"type": scaling_type, "factor": 10.0}
UpperCAmelCase : List[str] = OpenLlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
UpperCAmelCase : Optional[int] = scaled_model(snake_case ).last_hidden_state
UpperCAmelCase : Union[str, Any] = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
| 311
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 1
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Tuple = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = "image_qa"
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor
SCREAMING_SNAKE_CASE__ : Dict = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Tuple = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 311
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : str = {"vocab_file": "sentencepiece.bpe.model"}
a : str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
a : List[Any] = {
"camembert-base": 5_12,
}
a : Dict = "▁"
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=["<s>NOTUSED", "</s>NOTUSED"] , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
UpperCAmelCase : int = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCAmelCase : List[str] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
UpperCAmelCase : Optional[Any] = len(self.fairseq_tokens_to_ids )
UpperCAmelCase : Union[str, Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self , snake_case , snake_case = None , snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self , snake_case ):
'''simple docstring'''
return self.sp_model.encode(snake_case , out_type=snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(snake_case ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = ""
UpperCAmelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(snake_case )
UpperCAmelCase : int = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Any = None
return state
def __setstate__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : Tuple = {}
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : Tuple = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 311
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) == 0:
return []
UpperCAmelCase , UpperCAmelCase : List[str] = min(__magic_name__ ), max(__magic_name__ )
UpperCAmelCase : int = int(max_value - min_value ) + 1
UpperCAmelCase : list[list] = [[] for _ in range(__magic_name__ )]
for i in my_list:
buckets[int(i - min_value )].append(__magic_name__ )
return [v for bucket in buckets for v in sorted(__magic_name__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 311
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 1
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 1
|
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def lowercase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 311
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['''input_ids''', '''attention_mask''']
__snake_case = GPTaTokenizer
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[Any]="<|endoftext|>" , __UpperCAmelCase : Optional[Any]="<|endoftext|>" , __UpperCAmelCase : Optional[int]="<|endoftext|>" , __UpperCAmelCase : List[Any]=False , **__UpperCAmelCase : Optional[Any] , ) ->List[str]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
a = kwargs.pop('''add_bos_token''' , __UpperCAmelCase )
a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCAmelCase ) != add_prefix_space:
a = getattr(__UpperCAmelCase , pre_tok_state.pop('''type''' ) )
a = add_prefix_space
a = pre_tok_class(**__UpperCAmelCase )
a = add_prefix_space
def __lowerCAmelCase ( self : Dict , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Dict ) ->BatchEncoding:
"""simple docstring"""
a = kwargs.get('''is_split_into_words''' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Any ) ->BatchEncoding:
"""simple docstring"""
a = kwargs.get('''is_split_into_words''' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : "Conversation" ) ->List[int]:
"""simple docstring"""
a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
a = input_ids[-self.model_max_length :]
return input_ids
| 0
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> bool:
'''simple docstring'''
if index == len(snake_case_ ):
return True
# Recursive Step
for i in range(snake_case_ ):
if valid_coloring(graph[index] , snake_case_ , snake_case_ ):
# Color current vertex
UpperCAmelCase_ = i
# Validate coloring
if util_color(snake_case_ , snake_case_ , snake_case_ , index + 1 ):
return True
# Backtrack
UpperCAmelCase_ = -1
return False
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [-1] * len(snake_case_ )
if util_color(snake_case_ , snake_case_ , snake_case_ , 0 ):
return colored_vertices
return []
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase : Tuple = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(lowercase_ )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Any = """rag"""
lowerCAmelCase__ : List[Any] = True
def __init__(self : Dict , UpperCamelCase : List[Any]=None , UpperCamelCase : str=True , UpperCamelCase : List[Any]=None , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : str=None , UpperCamelCase : List[Any]=None , UpperCamelCase : str=" / " , UpperCamelCase : Union[str, Any]=" // " , UpperCamelCase : List[str]=5 , UpperCamelCase : Tuple=300 , UpperCamelCase : Optional[int]=768 , UpperCamelCase : int=8 , UpperCamelCase : str="wiki_dpr" , UpperCamelCase : Optional[Any]="train" , UpperCamelCase : Any="compressed" , UpperCamelCase : Dict=None , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=False , UpperCamelCase : str=False , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str=True , UpperCamelCase : int=False , UpperCamelCase : Any=False , UpperCamelCase : Any=False , UpperCamelCase : List[str]=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(
bos_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , prefix=UpperCamelCase , vocab_size=UpperCamelCase , **UpperCamelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__ = kwargs.pop('''question_encoder''' )
lowercase__ = question_encoder_config.pop('''model_type''' )
lowercase__ = kwargs.pop('''generator''' )
lowercase__ = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
lowercase__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
lowercase__ = reduce_loss
lowercase__ = label_smoothing
lowercase__ = exclude_bos_score
lowercase__ = do_marginalize
lowercase__ = title_sep
lowercase__ = doc_sep
lowercase__ = n_docs
lowercase__ = max_combined_length
lowercase__ = dataset
lowercase__ = dataset_split
lowercase__ = index_name
lowercase__ = retrieval_vector_size
lowercase__ = retrieval_batch_size
lowercase__ = passages_path
lowercase__ = index_path
lowercase__ = use_dummy_dataset
lowercase__ = output_retrieved
lowercase__ = do_deduplication
lowercase__ = use_cache
if self.forced_eos_token_id is None:
lowercase__ = getattr(self.generator , '''forced_eos_token_id''' , UpperCamelCase )
@classmethod
def UpperCamelCase__ (cls : Optional[int] , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , **UpperCamelCase : int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.question_encoder.to_dict()
lowercase__ = self.generator.to_dict()
lowercase__ = self.__class__.model_type
return output
| 2
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
'''simple docstring'''
lowercase : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
A : Union[str, Any] = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(snake_case__ )
A : Union[str, Any] = ''''''.join(bin(snake_case__ )[2:].zfill(8 ) for byte in data )
A : List[str] = len(snake_case__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A : Union[str, Any] = B'''=''' * ((6 - len(snake_case__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(snake_case__ ) % 6)
else:
A : Optional[Any] = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(snake_case__ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
A : int = (
'''argument should be a bytes-like object or ASCII string, '''
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(snake_case__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(snake_case__ , snake_case__ ):
try:
A : List[str] = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
A : Any = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(snake_case__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A : int = encoded_data[:-padding]
A : List[str] = ''''''.join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A : int = ''''''.join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )
A : List[str] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(snake_case__ ) , 8 )
]
return bytes(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case ="""▁"""
__snake_case =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : int = BertGenerationTokenizer
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Dict = True
def __UpperCAmelCase ( self : int ) -> int:
super().setUp()
lowerCAmelCase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = '<s>'
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_0_0_2 )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __UpperCAmelCase ( self : Tuple ) -> int:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = 'Hello World!'
lowerCAmelCase = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def __UpperCAmelCase ( self : Dict ) -> int:
lowerCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='pt' , return_token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = BertGenerationConfig()
lowerCAmelCase = BertGenerationEncoder(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Dict ) -> List[str]:
# fmt: off
lowerCAmelCase = {'input_ids': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 4
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
from __future__ import annotations
from collections import namedtuple
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> tuple:
"""simple docstring"""
_lowercase =namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
from collections.abc import Generator
def __lowerCAmelCase ( ) -> Generator[int, None, None]:
__a , __a = 0, 1
while True:
__a , __a = b, a + b
yield b
def __lowerCAmelCase ( a__ = 1000 ) -> int:
__a = 1
__a = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
import random
class snake_case_ :
'''simple docstring'''
@staticmethod
def snake_case__( _UpperCamelCase : str ) ->tuple[list[int], list[int]]:
snake_case_ = [ord(_UpperCamelCase ) for i in text]
snake_case_ = []
snake_case_ = []
for i in plain:
snake_case_ = random.randint(1 , 3_0_0 )
snake_case_ = (i + k) * k
cipher.append(_UpperCamelCase )
key.append(_UpperCamelCase )
return cipher, key
@staticmethod
def snake_case__( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] ) ->str:
snake_case_ = []
for i in range(len(_UpperCamelCase ) ):
snake_case_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_UpperCamelCase ) )
return "".join(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 8
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] ={'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase : Dict ={
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase : List[str] ={
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
__lowerCAmelCase : Any ='▁'
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str]="<s>" , lowerCAmelCase__ :Optional[int]="</s>" , lowerCAmelCase__ :List[Any]="</s>" , lowerCAmelCase__ :List[str]="<s>" , lowerCAmelCase__ :str="<unk>" , lowerCAmelCase__ :List[str]="<pad>" , lowerCAmelCase__ :str="<mask>" , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file
__SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__SCREAMING_SNAKE_CASE : str = len(self.sp_model ) - 1
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __magic_name__( self :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__( self :int ) -> Optional[int]:
return len(self.sp_model )
def __magic_name__( self :Dict ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__( self :Any , lowerCAmelCase__ :str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.PieceToId(lowerCAmelCase__ )
return spm_id if spm_id else self.unk_token_id
def __magic_name__( self :str , lowerCAmelCase__ :Any ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : str = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def __getstate__( self :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : Optional[int] = None
return state
def __setstate__( self :int , lowerCAmelCase__ :Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 9
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ["gpt2"]
__A = "gpt2"
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( tf.Module ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : List[Any]) ->Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[Any] =tokenizer
lowerCamelCase__: List[Any] =AutoConfig.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Tuple =TFGPTaLMHeadModel.from_config(UpperCAmelCase_)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text"),))
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.tokenizer(UpperCAmelCase_)
lowerCamelCase__: str =tokenized["input_ids"].to_tensor()
lowerCamelCase__: Union[str, Any] =tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase__: Union[str, Any] =self.model(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)["logits"]
return outputs
@require_tf
@require_keras_nlp
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
super().setUp()
lowerCamelCase__: Tuple =[GPTaTokenizer.from_pretrained(UpperCAmelCase_) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase__: Union[str, Any] =[TFGPTaTokenizer.from_pretrained(UpperCAmelCase_) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
lowerCamelCase__: Union[str, Any] =[
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowerCamelCase__: Optional[Any] =list(zip(self.test_sentences , self.test_sentences[::-1]))
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
lowerCamelCase__: Any =tokenizer([test_inputs] , return_tensors="tf")
lowerCamelCase__: List[Any] =tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase__: Tuple =python_outputs[key].numpy()
lowerCamelCase__: Dict =tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase_ , tf.intaa) == tf_outputs_values))
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__: Union[str, Any] =tf.function(UpperCAmelCase_)
for test_inputs in self.test_sentences:
lowerCamelCase__: List[str] =tf.constant(UpperCAmelCase_)
lowerCamelCase__: Any =compiled_tokenizer(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tf_tokenizer(UpperCAmelCase_)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__: Any =ModelToSave(tokenizer=UpperCAmelCase_)
lowerCamelCase__: List[Any] =tf.convert_to_tensor([self.test_sentences[0]])
lowerCamelCase__: Union[str, Any] =model.serving(UpperCAmelCase_) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase__: Optional[int] =Path(UpperCAmelCase_) / "saved.model"
tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={"serving_default": model.serving})
lowerCamelCase__: str =tf.saved_model.load(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =loaded_model.signatures["serving_default"](UpperCAmelCase_)["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__: int =tf.convert_to_tensor([self.test_sentences[0]])
lowerCamelCase__: Optional[int] =tf_tokenizer(UpperCAmelCase_) # Build model with some sample inputs
lowerCamelCase__: Union[str, Any] =tf_tokenizer.get_config()
lowerCamelCase__: str =TFGPTaTokenizer.from_config(UpperCAmelCase_)
lowerCamelCase__: Any =model_from_config(UpperCAmelCase_)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase__: List[Any] =123_123
for max_length in [3, 5, 1_024]:
lowerCamelCase__: Tuple =tf.convert_to_tensor([self.test_sentences[0]])
lowerCamelCase__: List[str] =tf_tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_)
lowerCamelCase__: List[Any] =out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 10
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'spiece.model'}
lowerCAmelCase__ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
lowerCAmelCase__ = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
lowerCAmelCase__ = '▁'
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : Tuple = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase)
if isinstance(__lowerCamelCase , __lowerCamelCase)
else mask_token
)
_A : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_A : Union[str, Any] = do_lower_case
_A : List[str] = remove_space
_A : str = keep_accents
_A : List[str] = vocab_file
_A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> Tuple:
return len(self.sp_model)
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[Any] = {self.convert_ids_to_tokens(__lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Union[str, Any]:
_A : int = self.__dict__.copy()
_A : Optional[int] = None
return state
def __setstate__( self , __lowerCamelCase) -> int:
_A : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_A : Tuple = {}
_A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
if self.remove_space:
_A : Optional[Any] = " ".join(inputs.strip().split())
else:
_A : List[str] = inputs
_A : int = outputs.replace("``" , "\"").replace("''" , "\"")
if not self.keep_accents:
_A : Dict = unicodedata.normalize("NFKD" , __lowerCamelCase)
_A : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase)])
if self.do_lower_case:
_A : Optional[Any] = outputs.lower()
return outputs
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : Union[str, Any] = self.preprocess_text(__lowerCamelCase)
_A : int = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase)
_A : Any = []
for piece in pieces:
if len(__lowerCamelCase) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
_A : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
_A : List[str] = cur_pieces[1:]
else:
_A : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(__lowerCamelCase)
else:
new_pieces.append(__lowerCamelCase)
return new_pieces
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
return self.sp_model.PieceToId(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return self.sp_model.IdToPiece(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : int = []
_A : str = ""
_A : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase) + token
_A : Any = True
_A : List[str] = []
else:
current_sub_tokens.append(__lowerCamelCase)
_A : Union[str, Any] = False
out_string += self.sp_model.decode(__lowerCamelCase)
return out_string.strip()
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase)
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase)) + [1] + ([0] * len(__lowerCamelCase)) + [1]
return [1] + ([0] * len(__lowerCamelCase)) + [1]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(__lowerCamelCase , "wb") as fi:
_A : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase)
return (out_vocab_file,)
| 11
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
assert x is not None
assert y is not None
__lowerCamelCase = len(A__ )
__lowerCamelCase = len(A__ )
# declaring the array for storing the dp values
__lowerCamelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCamelCase = 1 if x[i - 1] == y[j - 1] else 0
__lowerCamelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCamelCase = """"""
__lowerCamelCase, __lowerCamelCase = m, n
while i > 0 and j > 0:
__lowerCamelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCamelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase_ = 'AGGTAB'
UpperCAmelCase_ = 'GXTXAYB'
UpperCAmelCase_ = 4
UpperCAmelCase_ = 'GTAB'
UpperCAmelCase_ , UpperCAmelCase_ = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 12
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''mctct'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=8065 , lowerCAmelCase__ : int=1536 , lowerCAmelCase__ : Tuple=36 , lowerCAmelCase__ : str=6144 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : List[Any]=384 , lowerCAmelCase__ : Dict=920 , lowerCAmelCase__ : Any=1E-5 , lowerCAmelCase__ : Dict=0.3 , lowerCAmelCase__ : Tuple="relu" , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : List[str]=0.3 , lowerCAmelCase__ : Any=0.3 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[Any]=0.3 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : Union[str, Any]=(7,) , lowerCAmelCase__ : Dict=(3,) , lowerCAmelCase__ : Optional[int]=80 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Dict="sum" , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Any , ):
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_: List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Dict = intermediate_size
SCREAMING_SNAKE_CASE_: int = num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] = attention_head_dim
SCREAMING_SNAKE_CASE_: str = max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_: Optional[Any] = layerdrop
SCREAMING_SNAKE_CASE_: int = hidden_act
SCREAMING_SNAKE_CASE_: Tuple = initializer_range
SCREAMING_SNAKE_CASE_: List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: str = pad_token_id
SCREAMING_SNAKE_CASE_: str = bos_token_id
SCREAMING_SNAKE_CASE_: Optional[int] = eos_token_id
SCREAMING_SNAKE_CASE_: Tuple = conv_glu_dim
SCREAMING_SNAKE_CASE_: List[Any] = conv_dropout
SCREAMING_SNAKE_CASE_: List[str] = num_conv_layers
SCREAMING_SNAKE_CASE_: List[Any] = input_feat_per_channel
SCREAMING_SNAKE_CASE_: Dict = input_channels
SCREAMING_SNAKE_CASE_: str = conv_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE_: Union[str, Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
SCREAMING_SNAKE_CASE_: Union[str, Any] = list(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = list(lowerCAmelCase__)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`.")
| 13
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCamelCase : Union[str, Any] = (720, 1280) # Height, Width
_lowerCamelCase : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCamelCase : str = 1 / 100
_lowerCamelCase : List[str] = """"""
_lowerCamelCase : List[str] = """"""
_lowerCamelCase : Optional[Any] = """"""
_lowerCamelCase : Dict = 250
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
A__ , A__ = get_dataset(lowercase_ , lowercase_ )
for index in range(lowercase_ ):
A__ = random.sample(range(len(lowercase_ ) ) , 4 )
A__ , A__ , A__ = update_image_and_anno(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , filter_scale=lowercase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ = random_chars(32 )
A__ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
A__ = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowercase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
A__ = []
for anno in new_annos:
A__ = anno[3] - anno[1]
A__ = anno[4] - anno[2]
A__ = anno[1] + width / 2
A__ = anno[2] + height / 2
A__ = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowercase_ )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[list, list]:
"""simple docstring"""
A__ = []
A__ = []
for label_file in glob.glob(os.path.join(lowercase_ , '''*.txt''' ) ):
A__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(lowercase_ ) as in_file:
A__ = in_file.readlines()
A__ = os.path.join(lowercase_ , f"""{label_name}.jpg""" )
A__ = []
for obj_list in obj_lists:
A__ = obj_list.rstrip('''\n''' ).split(''' ''' )
A__ = float(obj[1] ) - float(obj[3] ) / 2
A__ = float(obj[2] ) - float(obj[4] ) / 2
A__ = float(obj[1] ) + float(obj[3] ) / 2
A__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase_ )
labels.append(lowercase_ )
return img_paths, labels
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
A__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
A__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ = int(scale_x * output_size[1] )
A__ = int(scale_y * output_size[0] )
A__ = []
A__ = []
for i, index in enumerate(lowercase_ ):
A__ = all_img_list[index]
path_list.append(lowercase_ )
A__ = all_annos[index]
A__ = cva.imread(lowercase_ )
if i == 0: # top-left
A__ = cva.resize(lowercase_ , (divid_point_x, divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = bbox[1] * scale_x
A__ = bbox[2] * scale_y
A__ = bbox[3] * scale_x
A__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
A__ = cva.resize(lowercase_ , (output_size[1] - divid_point_x, divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = scale_x + bbox[1] * (1 - scale_x)
A__ = bbox[2] * scale_y
A__ = scale_x + bbox[3] * (1 - scale_x)
A__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
A__ = cva.resize(lowercase_ , (divid_point_x, output_size[0] - divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = bbox[1] * scale_x
A__ = scale_y + bbox[2] * (1 - scale_y)
A__ = bbox[3] * scale_x
A__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
A__ = cva.resize(
lowercase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = scale_x + bbox[1] * (1 - scale_x)
A__ = scale_y + bbox[2] * (1 - scale_y)
A__ = scale_x + bbox[3] * (1 - scale_x)
A__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
A__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
A__ = ascii_lowercase + digits
return "".join(random.choice(lowercase_ ) for _ in range(lowercase_ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 14
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a_ , n - 1 , a_ ) * a) % mod
else:
__A = binary_exponentiation(a_ , n / 2 , a_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE :Any = 701
SCREAMING_SNAKE_CASE :List[str] = 10_0000_0000
SCREAMING_SNAKE_CASE :List[str] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 15
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.test()
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = 0
lowercase__ : Dict = False
while not completed:
if counter == 1:
self.reset()
lowercase__ : int = self.advance()
if not self.does_advance(_snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase__ , lowercase__ , lowercase__ : Any = self.update(_snake_case )
counter += 1
if counter > 10_000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : int ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : int ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : List[str] ,_snake_case : Union[str, Any]=False ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : List[int] ) -> Any:
"""simple docstring"""
super(_snake_case ,self ).__init__()
if not isinstance(_snake_case ,_snake_case ) or len(_snake_case ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_snake_case ,_snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
lowercase__ : List[str] = token_ids
lowercase__ : Tuple = len(self.token_ids )
lowercase__ : Tuple = -1 # the index of the currently fulfilled step
lowercase__ : List[Any] = False
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Any ,_snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_snake_case )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_snake_case )}""" )
lowercase__ : Optional[int] = False
lowercase__ : List[Any] = False
lowercase__ : str = False
if self.does_advance(_snake_case ):
self.fulfilled_idx += 1
lowercase__ : Union[str, Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ : Optional[int] = True
lowercase__ : str = completed
else:
# failed to make progress.
lowercase__ : List[Any] = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = False
lowercase__ : Any = 0
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Tuple=False ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = PhrasalConstraint(self.token_ids )
if stateful:
lowercase__ : Union[str, Any] = self.seqlen
lowercase__ : Tuple = self.fulfilled_idx
lowercase__ : Tuple = self.completed
return new_constraint
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[List[int]] ,_snake_case : int=True ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = max([len(_snake_case ) for one in nested_token_ids] )
lowercase__ : List[str] = {}
for token_ids in nested_token_ids:
lowercase__ : Optional[int] = root
for tidx, token_id in enumerate(_snake_case ):
if token_id not in level:
lowercase__ : str = {}
lowercase__ : str = level[token_id]
if no_subsets and self.has_subsets(_snake_case ,_snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f""" {nested_token_ids}.""" )
lowercase__ : int = root
def UpperCAmelCase ( self : List[Any] ,_snake_case : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = self.trie
for current_token in current_seq:
lowercase__ : int = start[current_token]
lowercase__ : Union[str, Any] = list(start.keys() )
return next_tokens
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = self.next_tokens(_snake_case )
return len(_snake_case ) == 0
def UpperCAmelCase ( self : str ,_snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = list(root.values() )
if len(_snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(_snake_case ) for nn in next_nodes] )
def UpperCAmelCase ( self : int ,_snake_case : Tuple ,_snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.count_leaves(_snake_case )
return len(_snake_case ) != leaf_count
class __A ( A_ ):
'''simple docstring'''
def __init__( self : str ,_snake_case : List[List[int]] ) -> List[str]:
"""simple docstring"""
super(_snake_case ,self ).__init__()
if not isinstance(_snake_case ,_snake_case ) or len(_snake_case ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_snake_case ,_snake_case ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_snake_case ,_snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
lowercase__ : Tuple = DisjunctiveTrie(_snake_case )
lowercase__ : Tuple = nested_token_ids
lowercase__ : str = self.trie.max_height
lowercase__ : int = []
lowercase__ : int = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = self.trie.next_tokens(self.current_seq )
if len(_snake_case ) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : List[str] ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_snake_case )}""" )
lowercase__ : List[Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase ( self : Any ,_snake_case : int ) -> Tuple:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_snake_case )}""" )
lowercase__ : List[str] = False
lowercase__ : Tuple = False
lowercase__ : List[Any] = False
if self.does_advance(_snake_case ):
self.current_seq.append(_snake_case )
lowercase__ : Optional[Any] = True
else:
lowercase__ : str = True
self.reset()
lowercase__ : Any = self.trie.reached_leaf(self.current_seq )
lowercase__ : Dict = completed
return stepped, completed, reset
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = False
lowercase__ : Dict = []
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple=False ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase__ : Any = self.seqlen
lowercase__ : Tuple = self.current_seq
lowercase__ : int = self.completed
return new_constraint
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : List[Constraint] ) -> int:
"""simple docstring"""
lowercase__ : str = constraints
# max # of steps required to fulfill a given constraint
lowercase__ : str = max([c.seqlen for c in constraints] )
lowercase__ : Any = len(_snake_case )
lowercase__ : Optional[Any] = False
self.init_state()
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : str = []
lowercase__ : Tuple = None
lowercase__ : Optional[Any] = [constraint.copy(stateful=_snake_case ) for constraint in self.constraints]
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ : Tuple = constraint.advance()
if isinstance(_snake_case ,_snake_case ):
token_list.append(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
token_list.extend(_snake_case )
else:
lowercase__ : List[Any] = self.inprogress_constraint.advance()
if isinstance(_snake_case ,_snake_case ):
token_list.append(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
token_list.extend(_snake_case )
if len(_snake_case ) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[List[int]] ) -> Optional[int]:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__ , lowercase__ : Dict = self.add(_snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
lowercase__ , lowercase__ : Union[str, Any] = False, False
if self.completed:
lowercase__ : List[Any] = True
lowercase__ : Union[str, Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__ , lowercase__ , lowercase__ : Dict = self.inprogress_constraint.update(_snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_snake_case ) )
lowercase__ : str = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase__ : Union[str, Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase__ : List[str] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_snake_case ):
lowercase__ , lowercase__ , lowercase__ : Dict = pending_constraint.update(_snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_snake_case )
lowercase__ : str = None
if not complete and stepped:
lowercase__ : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ : int = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ : Any = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase ( self : List[Any] ,_snake_case : str=True ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ : Optional[int] = [
constraint.copy(stateful=_snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ : List[Any] = self.inprogress_constraint.copy(stateful=_snake_case )
lowercase__ : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 16
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
_a = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
_a = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
_a = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
], )
def _lowercase ( self : List[Any] ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : int=None, UpperCAmelCase__ : Tuple="uniform_average", UpperCAmelCase__ : List[Any]=True ):
__lowercase = mean_squared_error(
UpperCAmelCase__, UpperCAmelCase__, sample_weight=UpperCAmelCase__, multioutput=UpperCAmelCase__, squared=UpperCAmelCase__ )
return {"mse": mse}
| 17
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a__ :
A = 42
A = None
# Automatically constructed
A = "dict"
A = None
A = field(default='Translation' , init=A__ , repr=A__ )
def __call__( self : Tuple ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class a__ :
A = None
A = None
A = None
# Automatically constructed
A = "dict"
A = None
A = field(default='TranslationVariableLanguages' , init=A__ , repr=A__ )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Dict ):
"""simple docstring"""
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def __UpperCamelCase ( self : List[Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = set(self.languages )
if self.languages and set(_A ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(_A ) - lang_set ) )}) are not in valid set ({", ".join(_A )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for lang, text in translation_dict.items():
if isinstance(_A,_A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = zip(*sorted(_A ) )
return {"language": languages, "translation": translations}
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 18
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
def lowerCamelCase_ ( lowerCamelCase__ ):
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = credit_card_number
lowerCamelCase_ = 0
lowerCamelCase_ = len(lowerCamelCase__ ) - 2
for i in range(lowerCamelCase__ , -1 , -2 ):
# double the value of every second digit
lowerCamelCase_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowerCamelCase_ = cc_number[:i] + str(lowerCamelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCamelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCamelCase__ ) <= 1_6:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCamelCase__ ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCamelCase__ ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 19
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
# Initialise PyTorch model
_lowercase : Union[str, Any] = FunnelConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Tuple = FunnelBaseModel(lowerCamelCase_ ) if base_model else FunnelModel(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowercase : str , __lowercase : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
_UpperCAmelCase = word_bank or []
# create a table
_UpperCAmelCase = len(__lowercase ) + 1
_UpperCAmelCase = []
for _ in range(__lowercase ):
table.append([] )
# seed value
_UpperCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowercase )] == word:
_UpperCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowercase )]:
combination.reverse()
return table[len(__lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 22
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
'''simple docstring'''
import functools
from typing import Any
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : list[str] ) -> bool:
# Validation
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or len(_lowerCAmelCase ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
UpperCAmelCase : dict[str, Any] = {}
UpperCAmelCase : str = '''WORD_KEEPER'''
for word in words:
UpperCAmelCase : int = trie
for c in word:
if c not in trie_node:
UpperCAmelCase : str = {}
UpperCAmelCase : Dict = trie_node[c]
UpperCAmelCase : str = True
UpperCAmelCase : Dict = len(_lowerCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCAmelCase : int ) -> bool:
if index == len_string:
return True
UpperCAmelCase : int = trie
for i in range(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Tuple = trie_node.get(string[i] , _lowerCAmelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCAmelCase , _lowerCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Tuple = 'vit_mae'
def __init__(self : Dict , a__ : Union[str, Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Any=3072 , a__ : Any="gelu" , a__ : Optional[Any]=0.0 , a__ : Dict=0.0 , a__ : int=0.0_2 , a__ : Optional[Any]=1E-12 , a__ : Optional[Any]=224 , a__ : List[str]=16 , a__ : str=3 , a__ : List[str]=True , a__ : Dict=16 , a__ : List[str]=512 , a__ : List[str]=8 , a__ : Any=2048 , a__ : Dict=0.7_5 , a__ : int=False , **a__ : List[str] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
__snake_case = decoder_num_attention_heads
__snake_case = decoder_hidden_size
__snake_case = decoder_num_hidden_layers
__snake_case = decoder_intermediate_size
__snake_case = mask_ratio
__snake_case = norm_pix_loss
| 24
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase__ : Tuple = random.Random()
if is_torch_available():
import torch
def lowercase_ ( _snake_case ,_snake_case=1.0 ,_snake_case=None ,_snake_case=None ):
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = global_rng
SCREAMING_SNAKE_CASE__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4_00 , SCREAMING_SNAKE_CASE__=20_00 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1_60_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : Dict = min_seq_length
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feature_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[Any] = return_attention_mask
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
"""simple docstring"""
def _flatten(SCREAMING_SNAKE_CASE__ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = ASTFeatureExtractor
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ASTFeatureExtractionTester(self )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : int = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ : str = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
@require_torch
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Any = np.random.rand(1_00 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Dict = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ds.sort("""id""" ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[int] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Dict = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 25
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=3_2 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_0 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=None , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : str = scope
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self ) -> Dict:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self ) -> Tuple:
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BertGenerationEncoder(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
UpperCAmelCase_ : str = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = BertGenerationEncoder(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
UpperCAmelCase_ : str = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[int] = BertGenerationDecoder(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
# first forward pass
UpperCAmelCase_ : List[str] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase , )
UpperCAmelCase_ : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ : List[str] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['hidden_states'][0]
UpperCAmelCase_ : List[Any] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['hidden_states'][0]
# select random slice
UpperCAmelCase_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ) -> Union[str, Any]:
UpperCAmelCase_ : int = BertGenerationDecoder(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : List[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : int = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Tuple = BertGenerationEncoderTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Tuple = 'bert'
self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_ : str = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Any = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
UpperCAmelCase_ : Optional[int] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(_UpperCamelCase )[0]
UpperCAmelCase_ : Dict = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , _UpperCamelCase )
UpperCAmelCase_ : Dict = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
UpperCAmelCase_ : str = model(_UpperCamelCase )[0]
UpperCAmelCase_ : str = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , _UpperCamelCase )
UpperCAmelCase_ : Dict = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 29
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
from __future__ import annotations
class lowercase__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int ) -> None:
lowercase_ = data
lowercase_ = None
lowercase_ = None
def a ( snake_case__: Node | None ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def a ( snake_case__: Node | None ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def a ( snake_case__: Node ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase_ = Node(1 )
lowercase_ = Node(2 )
lowercase_ = Node(3 )
lowercase_ = Node(4 )
lowercase_ = Node(5 )
lowercase_ = Node(6 )
lowercase_ = Node(7 )
lowercase_ = Node(8 )
lowercase_ = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('''Tree is: ''' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 30
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : Tuple = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ["""BeitFeatureExtractor"""]
__SCREAMING_SNAKE_CASE : List[str] = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
from ... import PretrainedConfig
UpperCAmelCase_ : Optional[Any] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
snake_case__ : Any = '''nezha'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[int]=2_1_1_2_8 , SCREAMING_SNAKE_CASE__ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Tuple=6_4 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1E-12 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : str=True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = vocab_size
a_ : Tuple = hidden_size
a_ : List[Any] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : List[str] = hidden_act
a_ : Tuple = intermediate_size
a_ : int = hidden_dropout_prob
a_ : Dict = attention_probs_dropout_prob
a_ : List[Any] = max_position_embeddings
a_ : Optional[Any] = max_relative_position
a_ : Dict = type_vocab_size
a_ : str = initializer_range
a_ : List[Any] = layer_norm_eps
a_ : Optional[Any] = classifier_dropout
a_ : Optional[Any] = use_cache
| 32
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__A : Any = logging.get_logger(__name__)
class _UpperCAmelCase :
def __init__( self : List[Any] , A : List[str] , A : Tuple ) -> List[str]:
lowercase_ : Union[str, Any] = question_encoder
lowercase_ : int = generator
lowercase_ : Any = self.question_encoder
def A ( self : Union[str, Any] , A : List[Any] ) -> Tuple:
if os.path.isfile(A ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(A , exist_ok=A )
lowercase_ : str = os.path.join(A , '''question_encoder_tokenizer''' )
lowercase_ : int = os.path.join(A , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(A )
self.generator.save_pretrained(A )
@classmethod
def A ( cls : Union[str, Any] , A : Optional[int] , **A : Union[str, Any] ) -> int:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowercase_ : Tuple = kwargs.pop('''config''' , A )
if config is None:
lowercase_ : int = RagConfig.from_pretrained(A )
lowercase_ : Any = AutoTokenizer.from_pretrained(
A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(
A , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=A , generator=A )
def __call__( self : Optional[int] , *A : int , **A : Tuple ) -> str:
return self.current_tokenizer(*A , **A )
def A ( self : str , *A : Union[str, Any] , **A : str ) -> Tuple:
return self.generator.batch_decode(*A , **A )
def A ( self : Union[str, Any] , *A : int , **A : Dict ) -> Union[str, Any]:
return self.generator.decode(*A , **A )
def A ( self : str ) -> List[str]:
lowercase_ : List[Any] = self.question_encoder
def A ( self : Optional[Any] ) -> Tuple:
lowercase_ : Optional[Any] = self.generator
def A ( self : Optional[int] , A : List[str] , A : Optional[List[str]] = None , A : Optional[int] = None , A : Optional[int] = None , A : str = "longest" , A : str = None , A : bool = True , **A : Optional[int] , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , A , )
if max_length is None:
lowercase_ : str = self.current_tokenizer.model_max_length
lowercase_ : Tuple = self(
A , add_special_tokens=A , return_tensors=A , max_length=A , padding=A , truncation=A , **A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowercase_ : Dict = self.current_tokenizer.model_max_length
lowercase_ : Tuple = self(
text_target=A , add_special_tokens=A , return_tensors=A , padding=A , max_length=A , truncation=A , **A , )
lowercase_ : Optional[Any] = labels['''input_ids''']
return model_inputs
| 33
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def snake_case_ (_a : Any ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def snake_case_ (_a : List[Any] ):
UpperCAmelCase = create_tensor(_a )
UpperCAmelCase = gather(_a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def snake_case_ (_a : str ):
UpperCAmelCase = [state.process_index]
UpperCAmelCase = gather_object(_a )
assert len(_a ) == state.num_processes, F"{gathered_obj}, {len(_a )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def snake_case_ (_a : Optional[int] ):
UpperCAmelCase = create_tensor(_a )
UpperCAmelCase = broadcast(_a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def snake_case_ (_a : Optional[Any] ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase = pad_across_processes(_a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def snake_case_ (_a : Tuple ):
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase = create_tensor(_a )
UpperCAmelCase = reduce(_a , '''sum''' )
UpperCAmelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_a , _a ), F"{reduced_tensor} != {truth_tensor}"
def snake_case_ (_a : Optional[Any] ):
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase = create_tensor(_a )
UpperCAmelCase = reduce(_a , '''mean''' )
UpperCAmelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_a , _a ), F"{reduced_tensor} != {truth_tensor}"
def snake_case_ (_a : Any ):
# For xla_spawn (TPUs)
main()
def snake_case_ ():
UpperCAmelCase = PartialState()
state.print(F"State: {state}" )
state.print('''testing gather''' )
test_gather(_a )
state.print('''testing gather_object''' )
test_gather_object(_a )
state.print('''testing broadcast''' )
test_broadcast(_a )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(_a )
state.print('''testing reduce_sum''' )
test_reduce_sum(_a )
state.print('''testing reduce_mean''' )
test_reduce_mean(_a )
if __name__ == "__main__":
main()
| 34
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 35
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[Any] = ['''image_processor''', '''tokenizer''']
__lowercase : List[Any] = '''BridgeTowerImageProcessor'''
__lowercase : Dict = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
super().__init__(__UpperCAmelCase ,__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = 0 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> BatchEncoding:
lowerCAmelCase__ : int = self.tokenizer(
text=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,max_length=__UpperCAmelCase ,stride=__UpperCAmelCase ,pad_to_multiple_of=__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,return_overflowing_tokens=__UpperCAmelCase ,return_special_tokens_mask=__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,return_length=__UpperCAmelCase ,verbose=__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,**__UpperCAmelCase ,)
# add pixel_values + pixel_mask
lowerCAmelCase__ : Optional[int] = self.image_processor(
__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,do_normalize=__UpperCAmelCase ,do_center_crop=__UpperCAmelCase ,**__UpperCAmelCase )
encoding.update(__UpperCAmelCase )
return encoding
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
return self.tokenizer.batch_decode(*__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
return self.tokenizer.decode(*__UpperCAmelCase ,**__UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : List[str] = self.tokenizer.model_input_names
lowerCAmelCase__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 37
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase_ : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCAmelCase_ : str = tuple[int, int]
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Node | None , ):
UpperCamelCase :Any = pos_x
UpperCamelCase :List[str] = pos_y
UpperCamelCase :Any = (pos_y, pos_x)
UpperCamelCase :str = goal_x
UpperCamelCase :Tuple = goal_y
UpperCamelCase :Tuple = g_cost
UpperCamelCase :Tuple = parent
UpperCamelCase :List[Any] = self.calculate_heuristic()
UpperCamelCase :List[str] = self.g_cost + self.h_cost
def _A ( self : List[Any] ):
UpperCamelCase :List[str] = self.pos_x - self.goal_x
UpperCamelCase :Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowerCamelCase ) + abs(__lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , __lowerCamelCase : Node ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowerCamelCase : TPosition , __lowerCamelCase : TPosition ):
UpperCamelCase :str = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
UpperCamelCase :int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __lowerCamelCase )
UpperCamelCase :Optional[Any] = [self.start]
UpperCamelCase :list[Node] = []
UpperCamelCase :Optional[int] = False
def _A ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase :Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
UpperCamelCase :List[Any] = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
UpperCamelCase :List[Any] = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
return [self.start.pos]
def _A ( self : Tuple , __lowerCamelCase : Node ):
UpperCamelCase :List[Any] = []
for action in delta:
UpperCamelCase :Union[str, Any] = parent.pos_x + action[1]
UpperCamelCase :List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _A ( self : Any , __lowerCamelCase : Node | None ):
UpperCamelCase :Union[str, Any] = node
UpperCamelCase :List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase :Optional[Any] = current_node.parent
path.reverse()
return path
class _SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __lowerCamelCase : TPosition , __lowerCamelCase : TPosition ):
UpperCamelCase :Any = AStar(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = AStar(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = False
def _A ( self : Tuple ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase :Dict = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase :Union[str, Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowerCamelCase , __lowerCamelCase )
self.fwd_astar.closed_nodes.append(__lowerCamelCase )
self.bwd_astar.closed_nodes.append(__lowerCamelCase )
UpperCamelCase :int = current_bwd_node
UpperCamelCase :str = current_fwd_node
UpperCamelCase :List[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(__lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
UpperCamelCase :Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowerCamelCase )
else:
astar.open_nodes.append(__lowerCamelCase )
return [self.fwd_astar.start.pos]
def _A ( self : List[Any] , __lowerCamelCase : Node , __lowerCamelCase : Node ):
UpperCamelCase :List[str] = self.fwd_astar.retrace_path(__lowerCamelCase )
UpperCamelCase :Tuple = self.bwd_astar.retrace_path(__lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase :List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCAmelCase_ : Optional[int] = (0, 0)
UpperCAmelCase_ : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Any = AStar(init, goal)
UpperCAmelCase_ : str = a_star.search()
UpperCAmelCase_ : Optional[int] = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
UpperCAmelCase_ : Union[str, Any] = time.time()
UpperCAmelCase_ : List[str] = BidirectionalAStar(init, goal)
UpperCAmelCase_ : Union[str, Any] = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 38
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["keras_nlp"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['keras_nlp'] )
| 39
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
def lowercase ( A_ = 1_000 )-> int:
'''simple docstring'''
a : List[str] = 3
a : List[str] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.