code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import tensorflow as tf
from ...tf_utils import shape_list
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1 , lowerCAmelCase=False , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= vocab_size
__lowercase= d_embed
__lowercase= d_proj
__lowercase= cutoffs + [vocab_size]
__lowercase= [0] + self.cutoffs
__lowercase= div_val
__lowercase= self.cutoffs[0]
__lowercase= len(self.cutoffs ) - 1
__lowercase= self.shortlist_size + self.n_clusters
__lowercase= keep_order
__lowercase= []
__lowercase= []
def _A (self , lowerCAmelCase ):
if self.n_clusters > 0:
__lowercase= self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase , name='cluster_weight' )
__lowercase= self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__lowercase= self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase )
else:
self.out_projs.append(lowerCAmelCase )
__lowercase= self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_layers_._{i}_._weight' , )
__lowercase= self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__lowercase, __lowercase= self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase= self.d_embed // (self.div_val**i)
__lowercase= self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase )
__lowercase= self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_layers_._{i}_._weight' , )
__lowercase= self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase )
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
__lowercase= x
if proj is not None:
__lowercase= tf.einsum('ibd,ed->ibe' , lowerCAmelCase , lowerCAmelCase )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase , lowerCAmelCase ) + b
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase ):
__lowercase= shape_list(lowerCAmelCase )
__lowercase= tf.range(lp_size[0] , dtype=target.dtype )
__lowercase= tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True , lowerCAmelCase=False ):
__lowercase= 0
if self.n_clusters == 0:
__lowercase= self._logit(lowerCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__lowercase= tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase , logits=lowerCAmelCase )
__lowercase= tf.nn.log_softmax(lowerCAmelCase , axis=-1 )
else:
__lowercase= shape_list(lowerCAmelCase )
__lowercase= []
__lowercase= tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__lowercase, __lowercase= self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__lowercase= (target >= l_idx) & (target < r_idx)
__lowercase= tf.where(lowerCAmelCase )
__lowercase= tf.boolean_mask(lowerCAmelCase , lowerCAmelCase ) - l_idx
if self.div_val == 1:
__lowercase= self.out_layers[0][0][l_idx:r_idx]
__lowercase= self.out_layers[0][1][l_idx:r_idx]
else:
__lowercase= self.out_layers[i][0]
__lowercase= self.out_layers[i][1]
if i == 0:
__lowercase= tf.concat([cur_W, self.cluster_weight] , 0 )
__lowercase= tf.concat([cur_b, self.cluster_bias] , 0 )
__lowercase= self._logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.out_projs[0] )
__lowercase= tf.nn.log_softmax(lowerCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__lowercase= tf.boolean_mask(lowerCAmelCase , lowerCAmelCase )
__lowercase= self._gather_logprob(lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= self._logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.out_projs[i] )
__lowercase= tf.nn.log_softmax(lowerCAmelCase )
__lowercase= self.cutoffs[0] + i - 1 # No probability for the head cluster
__lowercase= head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase )
if target is not None:
__lowercase= tf.boolean_mask(lowerCAmelCase , lowerCAmelCase )
__lowercase= tf.boolean_mask(lowerCAmelCase , lowerCAmelCase )
__lowercase= self._gather_logprob(lowerCAmelCase , lowerCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase , -cur_logprob , shape_list(lowerCAmelCase ) )
__lowercase= tf.concat(lowerCAmelCase , axis=-1 )
if target is not None:
if return_mean:
__lowercase= tf.reduce_mean(lowerCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 304
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 304
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class A ( datasets.BuilderConfig ):
UpperCamelCase_ : int =10_000
UpperCamelCase_ : Optional[List[str]] =None
UpperCamelCase_ : Optional[datasets.Features] =None
class A ( datasets.ArrowBasedBuilder ):
UpperCamelCase_ : Union[str, Any] =ParquetConfig
def _A (self ):
return datasets.DatasetInfo(features=self.config.features )
def _A (self , lowerCAmelCase ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__lowercase= dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase , (str, list, tuple) ):
__lowercase= data_files
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__lowercase= []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowerCAmelCase ):
with open(lowerCAmelCase , 'rb' ) as f:
__lowercase= datasets.Features.from_arrow_schema(pq.read_schema(lowerCAmelCase ) )
break
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={'files': files} ) )
return splits
def _A (self , lowerCAmelCase ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase= table_cast(lowerCAmelCase , self.info.features.arrow_schema )
return pa_table
def _A (self , lowerCAmelCase ):
__lowercase= self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase ) ):
with open(lowerCAmelCase , 'rb' ) as f:
__lowercase= pq.ParquetFile(lowerCAmelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__lowercase= pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(lowerCAmelCase )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise
| 304
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 1
|
from collections import defaultdict
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowercase= [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCAmelCase ) )
]
__lowercase= defaultdict(lowerCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowercase= (1 << len(lowerCAmelCase )) - 1
def _A (self , lowerCAmelCase , lowerCAmelCase ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowercase= self.count_ways_until(lowerCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowercase= total_ways_util
return self.dp[mask][task_no]
def _A (self , lowerCAmelCase ):
# Store the list of persons for each task
for i in range(len(lowerCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(lowerCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCAmelCase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCAmelCase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 304
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Dict =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 304
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 1
|
from collections import deque
from .hash_table import HashTable
class A ( A_ ):
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase )
__lowercase= self.values[key]
def _A (self ):
return (
sum(self.charge_factor - len(lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase , lowerCAmelCase )
| 304
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A ( A_ ):
def __get__(self , lowerCAmelCase , lowerCAmelCase=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
__lowercase= '__cached_' + self.fget.__name__
__lowercase= getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if cached is None:
__lowercase= self.fget(lowerCAmelCase )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return cached
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
if is_torch_fx_proxy(lowercase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowercase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowercase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowercase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowercase__ , np.ndarray )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
return isinstance(lowercase__ , np.ndarray )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
return _is_numpy(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
import torch
return isinstance(lowercase__ , torch.Tensor )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
import torch
return isinstance(lowercase__ , torch.device )
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
import torch
if isinstance(lowercase__ , lowercase__ ):
if hasattr(lowercase__ , lowercase__ ):
__lowercase= getattr(lowercase__ , lowercase__ )
else:
return False
return isinstance(lowercase__ , torch.dtype )
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
return isinstance(lowercase__ , tf.Tensor )
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowercase__ , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowercase__ )
return type(lowercase__ ) == tf.Tensor
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(lowercase__ , jnp.ndarray )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowercase__ , (dict, UserDict) ):
return {k: to_py_obj(lowercase__ ) for k, v in obj.items()}
elif isinstance(lowercase__ , (list, tuple) ):
return [to_py_obj(lowercase__ ) for o in obj]
elif is_tf_tensor(lowercase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowercase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowercase__ ):
return np.asarray(lowercase__ ).tolist()
elif isinstance(lowercase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase__ , (dict, UserDict) ):
return {k: to_numpy(lowercase__ ) for k, v in obj.items()}
elif isinstance(lowercase__ , (list, tuple) ):
return np.array(lowercase__ )
elif is_tf_tensor(lowercase__ ):
return obj.numpy()
elif is_torch_tensor(lowercase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowercase__ ):
return np.asarray(lowercase__ )
else:
return obj
class A ( A_ ):
def _A (self ):
__lowercase= fields(self )
# Safety and consistency checks
if not len(lowerCAmelCase ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
__lowercase= getattr(self , class_fields[0].name )
__lowercase= all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCAmelCase ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= first_field.items()
__lowercase= True
else:
try:
__lowercase= iter(lowerCAmelCase )
__lowercase= True
except TypeError:
__lowercase= False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCAmelCase ):
if (
not isinstance(lowerCAmelCase , (list, tuple) )
or not len(lowerCAmelCase ) == 2
or not isinstance(element[0] , lowerCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__lowercase= first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__lowercase= element[1]
elif first_field is not None:
__lowercase= first_field
else:
for field in class_fields:
__lowercase= getattr(self , field.name )
if v is not None:
__lowercase= v
def __delitem__(self , *lowerCAmelCase , **lowerCAmelCase ):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__(self , lowerCAmelCase ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self , lowerCAmelCase , lowerCAmelCase ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCAmelCase , lowerCAmelCase )
super().__setattr__(lowerCAmelCase , lowerCAmelCase )
def __setitem__(self , lowerCAmelCase , lowerCAmelCase ):
# Will raise a KeyException if needed
super().__setitem__(lowerCAmelCase , lowerCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCAmelCase , lowerCAmelCase )
def _A (self ):
return tuple(self[k] for k in self.keys() )
class A ( A_ , A_ ):
@classmethod
def _A (cls , lowerCAmelCase ):
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class A ( A_ ):
UpperCamelCase_ : str ='''longest'''
UpperCamelCase_ : Optional[Any] ='''max_length'''
UpperCamelCase_ : Optional[Any] ='''do_not_pad'''
class A ( A_ ):
UpperCamelCase_ : List[str] ='''pt'''
UpperCamelCase_ : int ='''tf'''
UpperCamelCase_ : List[str] ='''np'''
UpperCamelCase_ : Optional[Any] ='''jax'''
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= context_managers
__lowercase= ExitStack()
def __enter__(self ):
for context_manager in self.context_managers:
self.stack.enter_context(lowerCAmelCase )
def __exit__(self , *lowerCAmelCase , **lowerCAmelCase ):
self.stack.__exit__(*lowerCAmelCase , **lowerCAmelCase )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= infer_framework(lowercase__ )
if framework == "tf":
__lowercase= inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__lowercase= inspect.signature(model_class.forward ) # PyTorch models
else:
__lowercase= inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= model_class.__name__
__lowercase= infer_framework(lowercase__ )
if framework == "tf":
__lowercase= inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__lowercase= inspect.signature(model_class.forward ) # PyTorch models
else:
__lowercase= inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCamelCase( lowercase__ , lowercase__ = "" , lowercase__ = "." ) -> str:
'''simple docstring'''
def _flatten_dict(lowercase__ , lowercase__="" , lowercase__="." ):
for k, v in d.items():
__lowercase= str(lowercase__ ) + delimiter + str(lowercase__ ) if parent_key else k
if v and isinstance(lowercase__ , lowercase__ ):
yield from flatten_dict(lowercase__ , lowercase__ , delimiter=lowercase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowercase__ , lowercase__ , lowercase__ ) )
@contextmanager
def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> int:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> Any:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.transpose(lowercase__ , axes=lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.T if axes is None else array.permute(*lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.transpose(lowercase__ , perm=lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.transpose(lowercase__ , axes=lowercase__ )
else:
raise ValueError(F'Type not supported for transpose: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.reshape(lowercase__ , lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.reshape(*lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.reshape(lowercase__ , lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.reshape(lowercase__ , lowercase__ )
else:
raise ValueError(F'Type not supported for reshape: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.squeeze(lowercase__ , axis=lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.squeeze(lowercase__ , axis=lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.squeeze(lowercase__ , axis=lowercase__ )
else:
raise ValueError(F'Type not supported for squeeze: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.expand_dims(lowercase__ , lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.unsqueeze(dim=lowercase__ )
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.expand_dims(lowercase__ , axis=lowercase__ )
elif is_jax_tensor(lowercase__ ):
return jnp.expand_dims(lowercase__ , axis=lowercase__ )
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
if is_numpy_array(lowercase__ ):
return np.size(lowercase__ )
elif is_torch_tensor(lowercase__ ):
return array.numel()
elif is_tf_tensor(lowercase__ ):
import tensorflow as tf
return tf.size(lowercase__ )
elif is_jax_tensor(lowercase__ ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowercase__ )}.' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(lowercase__ , (tuple, list) ):
__lowercase= [F'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__lowercase= F'{repo_id}--{value}'
return auto_map
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for base_class in inspect.getmro(lowercase__ ):
__lowercase= base_class.__module__
__lowercase= base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 304
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCAmelCase = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def _lowerCamelCase( lowercase__ = "dhaka" , lowercase__ = 5 ) -> int:
'''simple docstring'''
__lowercase= min(lowercase__ , 5_0 ) # Prevent abuse!
__lowercase= {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__lowercase= requests.get('https://www.google.com/search' , params=lowercase__ , headers=lowercase__ )
__lowercase= BeautifulSoup(html.text , 'html.parser' )
__lowercase= ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__lowercase= json.dumps(lowercase__ )
__lowercase= json.loads(lowercase__ )
__lowercase= re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , lowercase__ , )
if not matched_google_image_data:
return 0
__lowercase= re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(lowercase__ ) , )
__lowercase= re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , lowercase__ , )
for index, fixed_full_res_image in enumerate(lowercase__ ):
if index >= max_images:
return index
__lowercase= bytes(lowercase__ , 'ascii' ).decode(
'unicode-escape' )
__lowercase= bytes(lowercase__ , 'ascii' ).decode(
'unicode-escape' )
__lowercase= urllib.request.build_opener()
__lowercase= [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(lowercase__ )
__lowercase= F'query_{query.replace(" " , "_" )}'
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
urllib.request.urlretrieve( # noqa: S310
lowercase__ , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
lowerCAmelCase = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 304
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 1
|
from typing import List
import numpy as np
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= {key: len(lowercase__ ) for key, value in gen_kwargs.items() if isinstance(lowercase__ , lowercase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__lowercase= max(lists_lengths.values() , default=0 )
return max(1 , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[range]:
'''simple docstring'''
__lowercase= []
for group_idx in range(lowercase__ ):
__lowercase= num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__lowercase= shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__lowercase= range(lowercase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowercase__ )
return shards_indices_per_group
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[dict]:
'''simple docstring'''
__lowercase= _number_of_shards_in_gen_kwargs(lowercase__ )
if num_shards == 1:
return [dict(lowercase__ )]
else:
__lowercase= _distribute_shards(num_shards=lowercase__ , max_num_jobs=lowercase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowercase__ , lowercase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowercase__ ) )
]
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowercase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _lowerCamelCase( lowercase__ , lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= {len(lowercase__ ) for value in gen_kwargs.values() if isinstance(lowercase__ , lowercase__ )}
__lowercase= {}
for size in list_sizes:
__lowercase= list(range(lowercase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__lowercase= dict(lowercase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowercase__ , lowercase__ ):
__lowercase= [value[i] for i in indices_per_size[len(lowercase__ )]]
return shuffled_kwargs
| 304
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 1
|
import os
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(lowercase__ ) + '/p022_names.txt' ) as file:
__lowercase= str(file.readlines()[0] )
__lowercase= names.replace('"' , '' ).split(',' )
names.sort()
__lowercase= 0
__lowercase= 0
for i, name in enumerate(lowercase__ ):
for letter in name:
name_score += ord(lowercase__ ) - 6_4
total_score += (i + 1) * name_score
__lowercase= 0
return total_score
if __name__ == "__main__":
print(solution())
| 304
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=8 ) -> Tuple:
'''simple docstring'''
__lowercase= height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase= width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
super().__init__()
self.register_modules(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , movq=lowerCAmelCase , )
__lowercase= 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if latents is None:
__lowercase= randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowercase= latents.to(lowerCAmelCase )
__lowercase= latents * scheduler.init_noise_sigma
return latents
def _A (self , lowerCAmelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowercase= torch.device(f'cuda:{gpu_id}' )
__lowercase= [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowercase= torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase= None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase, __lowercase= cpu_offload_with_hook(lowerCAmelCase , lowerCAmelCase , prev_module_hook=lowerCAmelCase )
# We'll offload the last model manually.
__lowercase= hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A (self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase )
def __call__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 5_1_2 , lowerCAmelCase = 5_1_2 , lowerCAmelCase = 1_0_0 , lowerCAmelCase = 4.0 , lowerCAmelCase = 1 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , ):
__lowercase= self._execution_device
__lowercase= guidance_scale > 1.0
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= torch.cat(lowerCAmelCase , dim=0 )
__lowercase= image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= torch.cat(lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
__lowercase= image_embeds.repeat_interleave(lowerCAmelCase , dim=0 )
__lowercase= negative_image_embeds.repeat_interleave(lowerCAmelCase , dim=0 )
__lowercase= torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase )
self.scheduler.set_timesteps(lowerCAmelCase , device=lowerCAmelCase )
__lowercase= self.scheduler.timesteps
__lowercase= self.unet.config.in_channels
__lowercase, __lowercase= downscale_height_and_width(lowerCAmelCase , lowerCAmelCase , self.movq_scale_factor )
# create initial latent
__lowercase= self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase= torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase= {'image_embeds': image_embeds}
__lowercase= self.unet(
sample=lowerCAmelCase , timestep=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , added_cond_kwargs=lowerCAmelCase , return_dict=lowerCAmelCase , )[0]
if do_classifier_free_guidance:
__lowercase, __lowercase= noise_pred.split(latents.shape[1] , dim=1 )
__lowercase, __lowercase= noise_pred.chunk(2 )
__lowercase, __lowercase= variance_pred.chunk(2 )
__lowercase= noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase= torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase, __lowercase= noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase= self.scheduler.step(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase , )[0]
# post-processing
__lowercase= self.movq.decode(lowerCAmelCase , force_not_quantize=lowerCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowercase= image * 0.5 + 0.5
__lowercase= image.clamp(0 , 1 )
__lowercase= image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase= self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 304
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
lowerCAmelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
with open(lowercase__ , 'rb' ) as f:
__lowercase= Image.open(lowercase__ )
return im.convert('RGB' )
@dataclass
class A :
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCamelCase_ : Optional[float] =field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCamelCase_ : Optional[int] =field(
default=A_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] =field(
default=A_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _A (self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class A :
UpperCamelCase_ : str =field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A_ )} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCamelCase_ : str =field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= torch.stack([example['pixel_values'] for example in examples] )
__lowercase= torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase, __lowercase, __lowercase= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase= training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowercase= None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase= get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowercase= load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowercase= {}
if data_args.train_dir is not None:
__lowercase= os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__lowercase= os.path.join(data_args.validation_dir , '**' )
__lowercase= load_dataset(
'imagefolder' , data_files=lowercase__ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase= None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
__lowercase= dataset['train'].train_test_split(data_args.train_val_split )
__lowercase= split['train']
__lowercase= split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowercase= dataset['train'].features['labels'].names
__lowercase, __lowercase= {}, {}
for i, label in enumerate(lowercase__ ):
__lowercase= str(lowercase__ )
__lowercase= label
# Load the accuracy metric from the datasets package
__lowercase= evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel=lowercase__ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase= AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowercase= AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowercase= image_processor.size['shortest_edge']
else:
__lowercase= (image_processor.size['height'], image_processor.size['width'])
__lowercase= Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowercase= Compose(
[
RandomResizedCrop(lowercase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowercase= Compose(
[
Resize(lowercase__ ),
CenterCrop(lowercase__ ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase__ ):
__lowercase= [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(lowercase__ ):
__lowercase= [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowercase= (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowercase= (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase__ )
# Initalize our trainer
__lowercase= Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
__lowercase= None
if training_args.resume_from_checkpoint is not None:
__lowercase= training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase= last_checkpoint
__lowercase= trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase= trainer.evaluate()
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
# Write model card and (optionally) push to hub
__lowercase= {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
if __name__ == "__main__":
main()
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class A ( datasets.BuilderConfig ):
UpperCamelCase_ : Optional[datasets.Features] =None
UpperCamelCase_ : str ="utf-8"
UpperCamelCase_ : Optional[str] =None
UpperCamelCase_ : Optional[str] =None
UpperCamelCase_ : bool =True # deprecated
UpperCamelCase_ : Optional[int] =None # deprecated
UpperCamelCase_ : int =10 << 20 # 10MB
UpperCamelCase_ : Optional[bool] =None
class A ( datasets.ArrowBasedBuilder ):
UpperCamelCase_ : List[str] =JsonConfig
def _A (self ):
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
__lowercase= self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def _A (self , lowerCAmelCase ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__lowercase= dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase , (str, list, tuple) ):
__lowercase= data_files
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= [files]
__lowercase= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__lowercase= []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= [files]
__lowercase= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={'files': files} ) )
return splits
def _A (self , lowerCAmelCase ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__lowercase= self.config.features.arrow_schema.field(lowerCAmelCase ).type
__lowercase= pa_table.append_column(lowerCAmelCase , pa.array([None] * len(lowerCAmelCase ) , type=lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase= table_cast(lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def _A (self , lowerCAmelCase ):
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowercase= json.load(lowerCAmelCase )
# We keep only the field we are interested in
__lowercase= dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCAmelCase , (list, tuple) ):
__lowercase= set().union(*[row.keys() for row in dataset] )
__lowercase= {col: [row.get(lowerCAmelCase ) for row in dataset] for col in keys}
else:
__lowercase= dataset
__lowercase= pa.Table.from_pydict(lowerCAmelCase )
yield file_idx, self._cast_table(lowerCAmelCase )
# If the file has one json object per line
else:
with open(lowerCAmelCase , 'rb' ) as f:
__lowercase= 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__lowercase= max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
__lowercase= (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__lowercase= f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__lowercase= batch.decode(self.config.encoding , errors=lowerCAmelCase ).encode('utf-8' )
try:
while True:
try:
__lowercase= paj.read_json(
io.BytesIO(lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(lowerCAmelCase )
or block_size > len(lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowercase= json.load(lowerCAmelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCAmelCase , lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
__lowercase= set().union(*[row.keys() for row in dataset] )
__lowercase= {col: [row.get(lowerCAmelCase ) for row in dataset] for col in keys}
__lowercase= pa.Table.from_pydict(lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(lowerCAmelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase )
batch_idx += 1
| 304
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 1
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A :
UpperCamelCase_ : torch.Tensor # [batch_size x 3]
UpperCamelCase_ : torch.Tensor # [batch_size x 3]
UpperCamelCase_ : torch.Tensor # [batch_size x 3]
UpperCamelCase_ : torch.Tensor # [batch_size x 3]
UpperCamelCase_ : int
UpperCamelCase_ : int
UpperCamelCase_ : float
UpperCamelCase_ : float
UpperCamelCase_ : Tuple[int]
def _A (self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _A (self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _A (self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _A (self ):
__lowercase= torch.arange(self.height * self.width )
__lowercase= torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCAmelCase , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def _A (self ):
__lowercase, *__lowercase= self.shape
__lowercase= int(np.prod(lowerCAmelCase ) )
__lowercase= self.get_image_coords()
__lowercase= torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowercase= self.get_camera_rays(lowerCAmelCase )
__lowercase= rays.view(lowerCAmelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _A (self , lowerCAmelCase ):
__lowercase, *__lowercase, __lowercase= coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowercase= coords.view(lowerCAmelCase , -1 , 2 )
__lowercase= self.resolution()
__lowercase= self.fov()
__lowercase= (flat.float() / (res - 1)) * 2 - 1
__lowercase= fracs * torch.tan(fov / 2 )
__lowercase= fracs.view(lowerCAmelCase , -1 , 2 )
__lowercase= (
self.z.view(lowerCAmelCase , 1 , 3 )
+ self.x.view(lowerCAmelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCAmelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__lowercase= directions / directions.norm(dim=-1 , keepdim=lowerCAmelCase )
__lowercase= torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCAmelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCAmelCase , *lowerCAmelCase , 2 , 3 )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCAmelCase , height=lowerCAmelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( lowercase__ ) -> DifferentiableProjectiveCamera:
'''simple docstring'''
__lowercase= []
__lowercase= []
__lowercase= []
__lowercase= []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__lowercase= np.array([np.sin(lowercase__ ), np.cos(lowercase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowercase= -z * 4
__lowercase= np.array([np.cos(lowercase__ ), -np.sin(lowercase__ ), 0.0] )
__lowercase= np.cross(lowercase__ , lowercase__ )
origins.append(lowercase__ )
xs.append(lowercase__ )
ys.append(lowercase__ )
zs.append(lowercase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , width=lowercase__ , height=lowercase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase__ )) , )
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 1
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 304
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 1
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 304
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= tmp_path_factory.getbasetemp() / 'cache'
__lowercase= test_hf_cache_home / 'datasets'
__lowercase= test_hf_cache_home / 'metrics'
__lowercase= test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(lowercase__ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(lowercase__ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(lowercase__ ) )
__lowercase= test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(lowercase__ ) )
__lowercase= test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowercase__ ) )
@pytest.fixture(autouse=lowercase__ , scope='session' )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase__ )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , lowercase__ )
@pytest.fixture
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , lowercase__ )
| 304
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 1
|
from __future__ import annotations
import math
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
)
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__lowercase= math.log(len(lowercase__ ) , 2 )
print(F'Optimal value : {minimax(0 , 0 , lowercase__ , lowercase__ , lowercase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] ='''swinv2'''
UpperCamelCase_ : List[str] ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , lowerCAmelCase=2_2_4 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=9_6 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 1_2, 2_4] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=3_2 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
__lowercase= image_size
__lowercase= patch_size
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= depths
__lowercase= len(lowerCAmelCase )
__lowercase= num_heads
__lowercase= window_size
__lowercase= mlp_ratio
__lowercase= qkv_bias
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= drop_path_rate
__lowercase= hidden_act
__lowercase= use_absolute_embeddings
__lowercase= layer_norm_eps
__lowercase= initializer_range
__lowercase= encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase= int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
__lowercase= (0, 0, 0, 0)
| 304
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 1
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> List[Any]:
'''simple docstring'''
__lowercase= None
if token is not None:
__lowercase= {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
__lowercase= F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
__lowercase= requests.get(lowercase__ , headers=lowercase__ ).json()
__lowercase= {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
__lowercase= math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
__lowercase= requests.get(url + F'&page={i + 2}' , headers=lowercase__ ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> List[str]:
'''simple docstring'''
__lowercase= None
if token is not None:
__lowercase= {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
__lowercase= F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
__lowercase= requests.get(lowercase__ , headers=lowercase__ ).json()
__lowercase= {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
__lowercase= math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
__lowercase= requests.get(url + F'&page={i + 2}' , headers=lowercase__ ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= None
if token is not None:
__lowercase= {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
__lowercase= requests.get(lowercase__ , headers=lowercase__ , allow_redirects=lowercase__ )
__lowercase= result.headers['Location']
__lowercase= requests.get(lowercase__ , allow_redirects=lowercase__ )
__lowercase= os.path.join(lowercase__ , F'{artifact_name}.zip' )
with open(lowercase__ , 'wb' ) as fp:
fp.write(response.content )
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> List[str]:
'''simple docstring'''
__lowercase= []
__lowercase= []
__lowercase= None
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowercase__ ) as f:
for line in f:
__lowercase= line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__lowercase= line[: line.index(': ' )]
__lowercase= line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
__lowercase= line[len('FAILED ' ) :]
failed_tests.append(lowercase__ )
elif filename == "job_name.txt":
__lowercase= line
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(lowercase__ )} for `errors` '
F'and {len(lowercase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
__lowercase= None
if job_name and job_links:
__lowercase= job_links.get(lowercase__ , lowercase__ )
# A list with elements of the form (line of error, error, failed test)
__lowercase= [x + [y] + [job_link] for x, y in zip(lowercase__ , lowercase__ )]
return result
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> str:
'''simple docstring'''
__lowercase= []
__lowercase= [os.path.join(lowercase__ , lowercase__ ) for p in os.listdir(lowercase__ ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowercase__ , job_links=lowercase__ ) )
return errors
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> Tuple:
'''simple docstring'''
__lowercase= Counter()
counter.update([x[1] for x in logs] )
__lowercase= counter.most_common()
__lowercase= {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__lowercase= {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
__lowercase= dict(sorted(r.items() , key=lambda lowercase__ : item[1]["count"] , reverse=lowercase__ ) )
return r
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= test.split('::' )[0]
if test.startswith('tests/models/' ):
__lowercase= test.split('/' )[2]
else:
__lowercase= None
return test
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> List[str]:
'''simple docstring'''
__lowercase= [(x[0], x[1], get_model(x[2] )) for x in logs]
__lowercase= [x for x in logs if x[2] is not None]
__lowercase= {x[2] for x in logs}
__lowercase= {}
for test in tests:
__lowercase= Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__lowercase= counter.most_common()
__lowercase= {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__lowercase= sum(error_counts.values() )
if n_errors > 0:
__lowercase= {'count': n_errors, 'errors': error_counts}
__lowercase= dict(sorted(r.items() , key=lambda lowercase__ : item[1]["count"] , reverse=lowercase__ ) )
return r
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= '| no. | error | status |'
__lowercase= '|-:|:-|:-|'
__lowercase= [header, sep]
for error in reduced_by_error:
__lowercase= reduced_by_error[error]['count']
__lowercase= F'| {count} | {error[:1_0_0]} | |'
lines.append(lowercase__ )
return "\n".join(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= '| model | no. of errors | major error | count |'
__lowercase= '|-:|-:|-:|-:|'
__lowercase= [header, sep]
for model in reduced_by_model:
__lowercase= reduced_by_model[model]['count']
__lowercase, __lowercase= list(reduced_by_model[model]['errors'].items() )[0]
__lowercase= F'| {model} | {count} | {error[:6_0]} | {_count} |'
lines.append(lowercase__ )
return "\n".join(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
lowerCAmelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase = get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase = k.find(''' / ''')
lowerCAmelCase = k[index + len(''' / ''') :]
lowerCAmelCase = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase = reduce_by_error(errors)
lowerCAmelCase = reduce_by_model(errors)
lowerCAmelCase = make_github_table(reduced_by_error)
lowerCAmelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 304
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A ( A_ ):
UpperCamelCase_ : Any ='''trocr'''
UpperCamelCase_ : int =['''past_key_values''']
UpperCamelCase_ : str ={
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= d_model
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= activation_function
__lowercase= max_position_embeddings
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= init_std
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= scale_embedding
__lowercase= use_learned_position_embeddings
__lowercase= layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A ( A_ ):
UpperCamelCase_ : str ='''unispeech'''
def __init__(self , lowerCAmelCase=3_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase="group" , lowerCAmelCase="gelu" , lowerCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase=False , lowerCAmelCase=1_2_8 , lowerCAmelCase=1_6 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=0.05 , lowerCAmelCase=1_0 , lowerCAmelCase=2 , lowerCAmelCase=0.0 , lowerCAmelCase=1_0 , lowerCAmelCase=0 , lowerCAmelCase=3_2_0 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , lowerCAmelCase=1_0_0 , lowerCAmelCase=2_5_6 , lowerCAmelCase=2_5_6 , lowerCAmelCase=0.1 , lowerCAmelCase="mean" , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2_5_6 , lowerCAmelCase=8_0 , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=0.5 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase )
__lowercase= hidden_size
__lowercase= feat_extract_norm
__lowercase= feat_extract_activation
__lowercase= list(lowerCAmelCase )
__lowercase= list(lowerCAmelCase )
__lowercase= list(lowerCAmelCase )
__lowercase= conv_bias
__lowercase= num_conv_pos_embeddings
__lowercase= num_conv_pos_embedding_groups
__lowercase= len(self.conv_dim )
__lowercase= num_hidden_layers
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= num_attention_heads
__lowercase= hidden_dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= feat_proj_dropout
__lowercase= final_dropout
__lowercase= layerdrop
__lowercase= layer_norm_eps
__lowercase= initializer_range
__lowercase= num_ctc_classes
__lowercase= vocab_size
__lowercase= do_stable_layer_norm
__lowercase= use_weighted_layer_sum
__lowercase= classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase= apply_spec_augment
__lowercase= mask_time_prob
__lowercase= mask_time_length
__lowercase= mask_time_min_masks
__lowercase= mask_feature_prob
__lowercase= mask_feature_length
__lowercase= mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowercase= num_codevectors_per_group
__lowercase= num_codevector_groups
__lowercase= contrastive_logits_temperature
__lowercase= feat_quantizer_dropout
__lowercase= num_negatives
__lowercase= codevector_dim
__lowercase= proj_codevector_dim
__lowercase= diversity_loss_weight
# ctc loss
__lowercase= ctc_loss_reduction
__lowercase= ctc_zero_infinity
# pretraining loss
__lowercase= replace_prob
@property
def _A (self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 304
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCAmelCase = get_logger()
lowerCAmelCase = None
class A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
super().__init__(features=lowerCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'Expected {device} to be a `str` not {type(lowerCAmelCase )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
__lowercase= device if isinstance(lowerCAmelCase , lowerCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowercase= self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
__lowercase= str(jax.devices()[0] )
__lowercase= jnp_array_kwargs
@staticmethod
def _A ():
import jax
return {str(lowerCAmelCase ): device for device in jax.devices()}
def _A (self , lowerCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase , lowerCAmelCase ) and column:
if all(
isinstance(lowerCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowerCAmelCase , axis=0 )
return column
def _A (self , lowerCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase , (str, bytes, type(lowerCAmelCase )) ):
return value
elif isinstance(lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__lowercase= {}
if isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__lowercase= {'dtype': jnp.intaa}
else:
__lowercase= {'dtype': jnp.intaa}
elif isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__lowercase= {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase , PIL.Image.Image ):
__lowercase= np.asarray(lowerCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowercase= self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _A (self , lowerCAmelCase ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowerCAmelCase , '__array__' ) and not isinstance(lowerCAmelCase , jax.Array ):
__lowercase= data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return map_nested(self._recursive_tensorize , lowerCAmelCase , map_list=lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.numpy_arrow_extractor().extract_row(lowerCAmelCase )
__lowercase= self.python_features_decoder.decode_row(lowerCAmelCase )
return self.recursive_tensorize(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.numpy_arrow_extractor().extract_column(lowerCAmelCase )
__lowercase= self.python_features_decoder.decode_column(lowerCAmelCase , pa_table.column_names[0] )
__lowercase= self.recursive_tensorize(lowerCAmelCase )
__lowercase= self._consolidate(lowerCAmelCase )
return column
def _A (self , lowerCAmelCase ):
__lowercase= self.numpy_arrow_extractor().extract_batch(lowerCAmelCase )
__lowercase= self.python_features_decoder.decode_batch(lowerCAmelCase )
__lowercase= self.recursive_tensorize(lowerCAmelCase )
for column_name in batch:
__lowercase= self._consolidate(batch[column_name] )
return batch
| 304
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 1
|
class A :
def __init__(self , lowerCAmelCase ):
# we need a list not a string, so do something to change the type
__lowercase= arr.split(',' )
def _A (self ):
__lowercase= [int(self.array[0] )] * len(self.array )
__lowercase= [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__lowercase= max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__lowercase= max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowerCAmelCase = input('''please input some numbers:''')
lowerCAmelCase = SubArray(whole_array)
lowerCAmelCase = array.solve_sub_array()
print(('''the results is:''', re))
| 304
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 1
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=0.2 , lowerCAmelCase=0.2 ):
__lowercase= bp_numa
__lowercase= bp_numa
__lowercase= bp_numa
__lowercase= conva_get[:2]
__lowercase= conva_get[2]
__lowercase= size_pa
__lowercase= rate_w
__lowercase= rate_t
__lowercase= [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__lowercase= np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase= np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase= -2 * np.random.rand(self.conva[1] ) + 1
__lowercase= -2 * np.random.rand(self.num_bpa ) + 1
__lowercase= -2 * np.random.rand(self.num_bpa ) + 1
def _A (self , lowerCAmelCase ):
# save model dict with pickle
__lowercase= {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(lowerCAmelCase , 'wb' ) as f:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
print(f'Model saved: {save_path}' )
@classmethod
def _A (cls , lowerCAmelCase ):
# read saved model
with open(lowerCAmelCase , 'rb' ) as f:
__lowercase= pickle.load(lowerCAmelCase ) # noqa: S301
__lowercase= model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
__lowercase= model_dic.get('size_pooling1' )
__lowercase= model_dic.get('num_bp1' )
__lowercase= model_dic.get('num_bp2' )
__lowercase= model_dic.get('num_bp3' )
__lowercase= model_dic.get('rate_weight' )
__lowercase= model_dic.get('rate_thre' )
# create model instance
__lowercase= CNN(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# modify model parameter
__lowercase= model_dic.get('w_conv1' )
__lowercase= model_dic.get('wkj' )
__lowercase= model_dic.get('vji' )
__lowercase= model_dic.get('thre_conv1' )
__lowercase= model_dic.get('thre_bp2' )
__lowercase= model_dic.get('thre_bp3' )
return conv_ins
def _A (self , lowerCAmelCase ):
return 1 / (1 + np.exp(-1 * x ))
def _A (self , lowerCAmelCase ):
return round(lowerCAmelCase , 3 )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# convolution process
__lowercase= convs[0]
__lowercase= convs[1]
__lowercase= np.shape(lowerCAmelCase )[0]
# get the data slice of original image data, data_focus
__lowercase= []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase ):
__lowercase= data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
__lowercase= []
__lowercase= int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCAmelCase ):
__lowercase= []
for i_focus in range(len(lowerCAmelCase ) ):
__lowercase= (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCAmelCase ) )
__lowercase= np.asmatrix(lowerCAmelCase ).reshape(
lowerCAmelCase , lowerCAmelCase )
data_featuremap.append(lowerCAmelCase )
# expanding the data slice to One dimenssion
__lowercase= []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCAmelCase ) )
__lowercase= np.asarray(lowerCAmelCase )
return focus_list, data_featuremap
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="average_pool" ):
# pooling process
__lowercase= len(featuremaps[0] )
__lowercase= int(size_map / size_pooling )
__lowercase= []
for i_map in range(len(lowerCAmelCase ) ):
__lowercase= featuremaps[i_map]
__lowercase= []
for i_focus in range(0 , lowerCAmelCase , lowerCAmelCase ):
for j_focus in range(0 , lowerCAmelCase , lowerCAmelCase ):
__lowercase= feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCAmelCase ) )
__lowercase= np.asmatrix(lowerCAmelCase ).reshape(lowerCAmelCase , lowerCAmelCase )
featuremap_pooled.append(lowerCAmelCase )
return featuremap_pooled
def _A (self , lowerCAmelCase ):
# expanding three dimension data to one dimension list
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
__lowercase= np.shape(data[i] )
__lowercase= data[i].reshape(1 , shapes[0] * shapes[1] )
__lowercase= data_listed.getA().tolist()[0]
data_expanded.extend(lowerCAmelCase )
__lowercase= np.asarray(lowerCAmelCase )
return data_expanded
def _A (self , lowerCAmelCase ):
# expanding matrix to one dimension list
__lowercase= np.asarray(lowerCAmelCase )
__lowercase= np.shape(lowerCAmelCase )
__lowercase= data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= []
__lowercase= 0
for i_map in range(lowerCAmelCase ):
__lowercase= np.ones((size_map, size_map) )
for i in range(0 , lowerCAmelCase , lowerCAmelCase ):
for j in range(0 , lowerCAmelCase , lowerCAmelCase ):
__lowercase= pd_pool[
i_pool
]
__lowercase= i_pool + 1
__lowercase= np.multiply(
lowerCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCAmelCase )
return pd_all
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=bool ):
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(lowerCAmelCase )) )
print((' - - Shape: Teach_Data ', np.shape(lowerCAmelCase )) )
__lowercase= 0
__lowercase= []
__lowercase= 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
__lowercase= 0
print(f'-------------Learning Time {rp}--------------' )
for p in range(len(lowerCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
__lowercase= np.asmatrix(datas_train[p] )
__lowercase= np.asarray(datas_teach[p] )
__lowercase, __lowercase= self.convolute(
lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase= self.pooling(lowerCAmelCase , self.size_poolinga )
__lowercase= np.shape(lowerCAmelCase )
__lowercase= self._expand(lowerCAmelCase )
__lowercase= data_bp_input
__lowercase= np.dot(lowerCAmelCase , self.vji.T ) - self.thre_bpa
__lowercase= self.sig(lowerCAmelCase )
__lowercase= np.dot(lowerCAmelCase , self.wkj.T ) - self.thre_bpa
__lowercase= self.sig(lowerCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowercase= np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCAmelCase , (1 - bp_outa) ) )
__lowercase= np.multiply(
np.dot(lowerCAmelCase , self.wkj ) , np.multiply(lowerCAmelCase , (1 - bp_outa) ) )
__lowercase= np.dot(lowerCAmelCase , self.vji )
__lowercase= pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowercase= pd_conva_pooled.T.getA().tolist()
__lowercase= self._calculate_gradient_from_pool(
lowerCAmelCase , lowerCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__lowercase= self._expand_mat(pd_conva_all[k_conv] )
__lowercase= self.rate_weight * np.dot(lowerCAmelCase , lowerCAmelCase )
__lowercase= self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__lowercase= (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__lowercase= self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowercase= self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowercase= self.thre_bpa - pd_k_all * self.rate_thre
__lowercase= self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowercase= np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowercase= rp + 1
__lowercase= error_count / patterns
all_mse.append(lowerCAmelCase )
def draw_error():
__lowercase= [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCAmelCase , '+-' )
plt.plot(lowerCAmelCase , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(lowerCAmelCase , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def _A (self , lowerCAmelCase ):
# model predict
__lowercase= []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(lowerCAmelCase )) )
for p in range(len(lowerCAmelCase ) ):
__lowercase= np.asmatrix(datas_test[p] )
__lowercase, __lowercase= self.convolute(
lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase= self.pooling(lowerCAmelCase , self.size_poolinga )
__lowercase= self._expand(lowerCAmelCase )
__lowercase= data_bp_input
__lowercase= bp_outa * self.vji.T - self.thre_bpa
__lowercase= self.sig(lowerCAmelCase )
__lowercase= bp_outa * self.wkj.T - self.thre_bpa
__lowercase= self.sig(lowerCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
__lowercase= [list(map(self.do_round , lowerCAmelCase ) ) for each in produce_out]
return np.asarray(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
# return the data of image after convoluting process so we can check it out
__lowercase= np.asmatrix(lowerCAmelCase )
__lowercase, __lowercase= self.convolute(
lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase= self.pooling(lowerCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 304
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 1
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCAmelCase = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase__ )
return [m.group(0 ) for m in matches]
def _lowerCamelCase( ) -> List[Any]:
'''simple docstring'''
__lowercase= transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowercase= {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__lowercase= collections.defaultdict(lowercase__ )
__lowercase= collections.defaultdict(lowercase__ )
__lowercase= collections.defaultdict(lowercase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase__ ):
__lowercase= None
if _re_tf_models.match(lowercase__ ) is not None:
__lowercase= tf_models
__lowercase= _re_tf_models.match(lowercase__ ).groups()[0]
elif _re_flax_models.match(lowercase__ ) is not None:
__lowercase= flax_models
__lowercase= _re_flax_models.match(lowercase__ ).groups()[0]
elif _re_pt_models.match(lowercase__ ) is not None:
__lowercase= pt_models
__lowercase= _re_pt_models.match(lowercase__ ).groups()[0]
if lookup_dict is not None:
while len(lowercase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__lowercase= True
break
# Try again after removing the last word in the name
__lowercase= ''.join(camel_case_split(lowercase__ )[:-1] )
__lowercase= set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__lowercase= list(lowercase__ )
all_models.sort()
__lowercase= {'model_type': all_models}
__lowercase= [pt_models[t] for t in all_models]
__lowercase= [tf_models[t] for t in all_models]
__lowercase= [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__lowercase= {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__lowercase= 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__lowercase= 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__lowercase= 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__lowercase= 'AutoTokenizer'
__lowercase= [processors[t] for t in all_models]
return pd.DataFrame(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__lowercase= [model_mapping, F'TF_{model_mapping}', F'FLAX_{model_mapping}']
__lowercase= [auto_class, F'TF_{auto_class}', F'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase__ , lowercase__ , lowercase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase__ , lowercase__ ):
continue
# First extract all model_names
__lowercase= []
for name in getattr(lowercase__ , lowercase__ ).values():
if isinstance(lowercase__ , lowercase__ ):
model_names.append(lowercase__ )
else:
model_names.extend(list(lowercase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= get_frameworks_table()
__lowercase= Dataset.from_pandas(lowercase__ )
__lowercase= hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase__ )
__lowercase= Dataset.from_json(lowercase__ )
__lowercase= {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase__ ) )
}
__lowercase= update_pipeline_and_auto_class_table(lowercase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__lowercase= sorted(table.keys() )
__lowercase= pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__lowercase= Dataset.from_pandas(lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase__ , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase__ , 'pipeline_tags.json' ) )
if commit_sha is not None:
__lowercase= (
F'Update with commit {commit_sha}\n\nSee: '
F'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
__lowercase= 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase__ , repo_type='dataset' , token=lowercase__ , commit_message=lowercase__ , )
def _lowerCamelCase( ) -> Any:
'''simple docstring'''
__lowercase= {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__lowercase= transformers_module.pipelines.SUPPORTED_TASKS
__lowercase= []
for key in pipeline_tasks:
if key not in in_table:
__lowercase= pipeline_tasks[key]['pt']
if isinstance(lowercase__ , (list, tuple) ):
__lowercase= model[0]
__lowercase= model.__name__
if model not in in_table.values():
missing.append(lowercase__ )
if len(lowercase__ ) > 0:
__lowercase= ', '.join(lowercase__ )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowerCAmelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 1
|
from __future__ import annotations
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
return [ord(lowercase__ ) - 9_6 for elem in plain]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return "".join(chr(elem + 9_6 ) for elem in encoded )
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowercase__ )
print('Decoded:' , decode(lowercase__ ) )
if __name__ == "__main__":
main()
| 304
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 1
|
class A :
def __init__(self , lowerCAmelCase = "" , lowerCAmelCase = False ):
# Mapping from the first character of the prefix of the node
__lowercase= {}
# A node will be a leaf if the tree contains its word
__lowercase= is_leaf
__lowercase= prefix
def _A (self , lowerCAmelCase ):
__lowercase= 0
for q, w in zip(self.prefix , lowerCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _A (self , lowerCAmelCase ):
for word in words:
self.insert(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowercase= True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowercase= RadixNode(prefix=lowerCAmelCase , is_leaf=lowerCAmelCase )
else:
__lowercase= self.nodes[word[0]]
__lowercase, __lowercase, __lowercase= incoming_node.match(
lowerCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowercase= remaining_prefix
__lowercase= self.nodes[matching_string[0]]
__lowercase= RadixNode(lowerCAmelCase , lowerCAmelCase )
__lowercase= aux_node
if remaining_word == "":
__lowercase= True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
__lowercase, __lowercase, __lowercase= incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
__lowercase, __lowercase, __lowercase= incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowercase= list(self.nodes.values() )[0]
__lowercase= merging_node.is_leaf
self.prefix += merging_node.prefix
__lowercase= merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowercase= False
# If there is 1 edge, we merge it with its child
else:
__lowercase= list(incoming_node.nodes.values() )[0]
__lowercase= merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowercase= merging_node.nodes
return True
def _A (self , lowerCAmelCase = 0 ):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _lowerCamelCase( ) -> bool:
'''simple docstring'''
__lowercase= 'banana bananas bandana band apple all beast'.split()
__lowercase= RadixNode()
root.insert_many(lowercase__ )
assert all(root.find(lowercase__ ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def _lowerCamelCase( ) -> None:
'''simple docstring'''
assert test_trie()
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= RadixNode()
__lowercase= 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowercase__ )
print('Words:' , lowercase__ )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 304
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 1
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=None , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : List[str] =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= FlaubertModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__lowercase= True
__lowercase= model_class(config=lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowercase= torch.jit.trace(
lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) )
__lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
__lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
__lowercase= model(lowerCAmelCase )[0]
__lowercase= torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowercase= torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 1
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('String lengths must match!' )
__lowercase= 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class A ( unittest.TestCase ):
UpperCamelCase_ : List[Any] =JukeboxTokenizer
UpperCamelCase_ : int ={
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _A (self ):
import torch
__lowercase= JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
__lowercase= tokenizer(**self.metas )['input_ids']
# fmt: off
__lowercase= [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _A (self ):
import torch
__lowercase= JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
__lowercase= tokenizer(**self.metas )['input_ids']
# fmt: off
__lowercase= [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 304
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 1
|
from importlib import import_module
from .logging import get_logger
lowerCAmelCase = get_logger(__name__)
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=None ):
__lowercase= attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= module._original_module if isinstance(lowerCAmelCase , _PatchedModuleObj ) else module
class A :
UpperCamelCase_ : List[str] =[]
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
__lowercase= obj
__lowercase= target
__lowercase= new
__lowercase= target.split('.' )[0]
__lowercase= {}
__lowercase= attrs or []
def __enter__(self ):
*__lowercase, __lowercase= self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase ) ):
try:
__lowercase= import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowercase= getattr(self.obj , lowerCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowercase= obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase , _PatchedModuleObj(lowerCAmelCase , attrs=self.attrs ) )
__lowercase= getattr(self.obj , lowerCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase , lowerCAmelCase , _PatchedModuleObj(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , attrs=self.attrs ) )
__lowercase= getattr(lowerCAmelCase , lowerCAmelCase )
# finally set the target attribute
setattr(lowerCAmelCase , lowerCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowercase= getattr(import_module('.'.join(lowerCAmelCase ) ) , lowerCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase ) is attr_value:
__lowercase= getattr(self.obj , lowerCAmelCase )
setattr(self.obj , lowerCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowercase= globals()['__builtins__'][target_attr]
setattr(self.obj , lowerCAmelCase , self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowerCAmelCase ):
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase , self.original.pop(lowerCAmelCase ) )
def _A (self ):
self.__enter__()
self._active_patches.append(self )
def _A (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 304
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 1
|
import numpy as np
from transformers import Pipeline
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= np.max(lowercase__ , axis=-1 , keepdims=lowercase__ )
__lowercase= np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ )
class A ( A_ ):
def _A (self , **lowerCAmelCase ):
__lowercase= {}
if "second_text" in kwargs:
__lowercase= kwargs['second_text']
return preprocess_kwargs, {}, {}
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
return self.tokenizer(lowerCAmelCase , text_pair=lowerCAmelCase , return_tensors=self.framework )
def _A (self , lowerCAmelCase ):
return self.model(**lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= model_outputs.logits[0].numpy()
__lowercase= softmax(lowerCAmelCase )
__lowercase= np.argmax(lowerCAmelCase )
__lowercase= self.model.config.idalabel[best_class]
__lowercase= probabilities[best_class].item()
__lowercase= logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 1
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
return torch.atana(lowercase__ , lowercase__ ) / math.pi * 2
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= torch.sin(t * math.pi / 2 ) ** 2
__lowercase= (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase__ , lowercase__ )
class A ( A_ ):
pass
class A ( nn.Module ):
def __init__(self , lowerCAmelCase ):
super().__init__()
__lowercase= DiffusionAttnUnetaD(lowerCAmelCase , n_attn_layers=4 )
__lowercase= deepcopy(self.diffusion )
__lowercase= torch.quasirandom.SobolEngine(1 , scramble=lowerCAmelCase )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= MODELS_MAP[model_name]['url']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
lowerCAmelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
return name.replace(lowercase__ , lowercase__ )
elif name.startswith(lowercase__ ):
return [name.replace(lowercase__ , lowercase__ ) for v in value]
raise ValueError(F'Attn error with {name}' )
def _lowerCamelCase( lowercase__ , lowercase__=1_3 ) -> Dict:
'''simple docstring'''
__lowercase= input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
__lowercase= 0
if string.startswith('net.3.' ):
depth += 1
__lowercase= string[6:]
elif string.startswith('net.' ):
__lowercase= string[4:]
while string.startswith('main.7.' ):
depth += 1
__lowercase= string[7:]
if string.startswith('main.' ):
__lowercase= string[5:]
# mid block
if string[:2].isdigit():
__lowercase= string[:2]
__lowercase= string[2:]
else:
__lowercase= string[0]
__lowercase= string[1:]
if depth == max_depth:
__lowercase= MID_NUM_TO_LAYER[layer_num]
__lowercase= 'mid_block'
elif depth > 0 and int(lowercase__ ) < 7:
__lowercase= DOWN_NUM_TO_LAYER[layer_num]
__lowercase= F'down_blocks.{depth}'
elif depth > 0 and int(lowercase__ ) > 7:
__lowercase= UP_NUM_TO_LAYER[layer_num]
__lowercase= F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
__lowercase= DEPTH_0_TO_LAYER[layer_num]
__lowercase= F'up_blocks.{max_depth - 1}' if int(lowercase__ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
__lowercase= string_left[1:]
if "resnets" in new_layer:
__lowercase= convert_resconv_naming(lowercase__ )
elif "attentions" in new_layer:
__lowercase= convert_attn_naming(lowercase__ )
__lowercase= new_string_left
if not isinstance(lowercase__ , lowercase__ ):
__lowercase= prefix + '.' + new_layer + '.' + string_left
else:
__lowercase= [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
__lowercase= rename(lowercase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase__ , lowercase__ ):
__lowercase= transform_conv_attns(lowercase__ , lowercase__ , lowercase__ )
else:
__lowercase= v
return new_state_dict
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
if len(lowercase__ ) == 1:
if len(v.shape ) == 3:
# weight
__lowercase= v[:, :, 0]
else:
# bias
__lowercase= v
else:
# qkv matrices
__lowercase= v.shape[0]
__lowercase= trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowercase= v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowercase= v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__lowercase= args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
__lowercase= download(lowercase__ )
__lowercase= MODELS_MAP[model_name]['sample_rate']
__lowercase= MODELS_MAP[model_name]['sample_size']
__lowercase= Object()
__lowercase= sample_size
__lowercase= sample_rate
__lowercase= 0
__lowercase= UNetaDModel(sample_size=lowercase__ , sample_rate=lowercase__ )
__lowercase= diffusers_model.state_dict()
__lowercase= DiffusionUncond(lowercase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowercase__ )['state_dict'] )
__lowercase= orig_model.diffusion_ema.eval()
__lowercase= orig_model.state_dict()
__lowercase= rename_orig_weights(lowercase__ )
__lowercase= set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowercase= set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase__ ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(lowercase__ ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
__lowercase= value.squeeze()
__lowercase= value
diffusers_model.load_state_dict(lowercase__ )
__lowercase= 1_0_0
__lowercase= 3_3
__lowercase= IPNDMScheduler(num_train_timesteps=lowercase__ )
__lowercase= torch.manual_seed(lowercase__ )
__lowercase= torch.randn([1, 2, config.sample_size] , generator=lowercase__ ).to(lowercase__ )
__lowercase= torch.linspace(1 , 0 , steps + 1 , device=lowercase__ )[:-1]
__lowercase= get_crash_schedule(lowercase__ )
__lowercase= DanceDiffusionPipeline(unet=lowercase__ , scheduler=lowercase__ )
__lowercase= torch.manual_seed(3_3 )
__lowercase= pipe(num_inference_steps=lowercase__ , generator=lowercase__ ).audios
__lowercase= sampling.iplms_sample(lowercase__ , lowercase__ , lowercase__ , {} )
__lowercase= generated.clamp(-1 , 1 )
__lowercase= (generated - audio).abs().sum()
__lowercase= (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , lowercase__ )
print('Diff max' , lowercase__ )
assert diff_max < 1E-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase = parser.parse_args()
main(args)
| 304
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.txt'''}
lowerCAmelCase = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
lowerCAmelCase = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= collections.OrderedDict()
with open(lowercase__ , 'r' , encoding='utf-8' ) as reader:
__lowercase= reader.readlines()
for index, token in enumerate(lowercase__ ):
__lowercase= token.rstrip('\n' )
__lowercase= index
return vocab
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase="<unk>" , lowerCAmelCase=2_0_0 ):
__lowercase= vocab
__lowercase= unk_token
__lowercase= max_input_chars_per_word
def _A (self , lowerCAmelCase ):
__lowercase= list(lowerCAmelCase )
if len(lowerCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowercase= 0
__lowercase= []
while start < len(lowerCAmelCase ):
__lowercase= len(lowerCAmelCase )
__lowercase= None
while start < end:
__lowercase= ''.join(chars[start:end] )
if substr in self.vocab:
__lowercase= substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCAmelCase )
__lowercase= end
return sub_tokens
class A ( A_ ):
UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Optional[int] =False
def __init__(self , lowerCAmelCase , lowerCAmelCase="<d>" , lowerCAmelCase="</d>" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<unk>" , lowerCAmelCase="</n>" , lowerCAmelCase="</_>" , lowerCAmelCase="left" , **lowerCAmelCase , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=lowerCAmelCase , eod_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , unk_token=lowerCAmelCase , line_token=lowerCAmelCase , space_token=lowerCAmelCase , padding_side=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= bod_token
__lowercase= eod_token
__lowercase= load_vocab(lowerCAmelCase )
__lowercase= self.encoder[space_token]
__lowercase= self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowercase= collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase : x[1] ) )
__lowercase= {v: k for k, v in self.encoder.items()}
__lowercase= WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _A (self ):
return self.encoder[self.bod_token]
@property
def _A (self ):
return self.encoder[self.eod_token]
@property
def _A (self ):
return self.encoder["\n"]
@property
def _A (self ):
return len(self.encoder )
def _A (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A (self , lowerCAmelCase ):
__lowercase= []
for x in jieba.cut(lowerCAmelCase , cut_all=lowerCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase ) )
return output_tokens
def _A (self , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= [i for i in token_ids if i >= 0]
__lowercase= [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return token in self.encoder
def _A (self , lowerCAmelCase ):
return "".join(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) )
def _A (self , lowerCAmelCase ):
return self.decoder.get(lowerCAmelCase , self.unk_token )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if os.path.isdir(lowerCAmelCase ):
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__lowercase= (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowercase= 0
if " " in self.encoder:
__lowercase= self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowercase= self.encoder['\n']
del self.encoder["\n"]
__lowercase= collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase : x[1] ) )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__lowercase= token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase ))
return [1] + ([0] * len(lowerCAmelCase ))
| 304
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 1
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : str =ConsistencyModelPipeline
UpperCamelCase_ : List[Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase_ : Optional[Any] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase_ : Union[str, Any] =frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _A (self ):
__lowercase= UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _A (self ):
__lowercase= UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _A (self , lowerCAmelCase=False ):
if class_cond:
__lowercase= self.dummy_cond_unet
else:
__lowercase= self.dummy_uncond_unet
# Default to CM multistep sampler
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= {
'unet': unet,
'scheduler': scheduler,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith('mps' ):
__lowercase= torch.manual_seed(lowerCAmelCase )
else:
__lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowercase= {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [2_2, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= ConsistencyModelPipeline(**lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components(class_cond=lowerCAmelCase )
__lowercase= ConsistencyModelPipeline(**lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= 0
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= ConsistencyModelPipeline(**lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= 1
__lowercase= None
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components(class_cond=lowerCAmelCase )
__lowercase= ConsistencyModelPipeline(**lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= 1
__lowercase= None
__lowercase= 0
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=False , lowerCAmelCase="cpu" , lowerCAmelCase=torch.floataa , lowerCAmelCase=(1, 3, 6_4, 6_4) ):
__lowercase= torch.manual_seed(lowerCAmelCase )
__lowercase= {
'num_inference_steps': None,
'timesteps': [2_2, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowercase= self.get_fixed_latents(seed=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase , shape=lowerCAmelCase )
__lowercase= latents
return inputs
def _A (self , lowerCAmelCase=0 , lowerCAmelCase="cpu" , lowerCAmelCase=torch.floataa , lowerCAmelCase=(1, 3, 6_4, 6_4) ):
if type(lowerCAmelCase ) == str:
__lowercase= torch.device(lowerCAmelCase )
__lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowercase= randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
return latents
def _A (self ):
__lowercase= UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= ConsistencyModelPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(torch_device=lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_inputs()
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _A (self ):
__lowercase= UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= ConsistencyModelPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(torch_device=lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_inputs()
__lowercase= 1
__lowercase= None
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _A (self ):
__lowercase= UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= ConsistencyModelPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(torch_device=lowerCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_inputs(get_fixed_latents=lowerCAmelCase , device=lowerCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase , enable_math=lowerCAmelCase , enable_mem_efficient=lowerCAmelCase ):
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _A (self ):
__lowercase= UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowercase= CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowercase= ConsistencyModelPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(torch_device=lowerCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_inputs(get_fixed_latents=lowerCAmelCase , device=lowerCAmelCase )
__lowercase= 1
__lowercase= None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase , enable_math=lowerCAmelCase , enable_mem_efficient=lowerCAmelCase ):
__lowercase= pipe(**lowerCAmelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= image[0, -3:, -3:, -1]
__lowercase= np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 304
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A_ )} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ : str =field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _A (self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class A :
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : Optional[int] =field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase_ : Optional[int] =field(
default=A_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase_ : Optional[int] =field(
default=A_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase_ : float =field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _A (self ):
if self.train_file is not None:
__lowercase= self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowercase= self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
with open(lowercase__ , 'r' , encoding='utf-8' ) as f:
__lowercase= [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())]
assert len(lowercase__ ) == len(lowercase__ )
__lowercase= {c: dataset[c] for c in dataset.column_names}
__lowercase= refs
return Dataset.from_dict(lowercase__ )
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase, __lowercase, __lowercase= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase= None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase= get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowercase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase= load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowercase= load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
__lowercase= load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
__lowercase= {}
if data_args.train_file is not None:
__lowercase= data_args.train_file
if data_args.validation_file is not None:
__lowercase= data_args.validation_file
__lowercase= data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowercase= 'text'
__lowercase= load_dataset(lowercase__ , data_files=lowercase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase= AutoConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
__lowercase= AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__lowercase= CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
__lowercase= {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowercase= AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ )
elif model_args.model_name_or_path:
__lowercase= AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowercase= AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowercase= AutoModelForMaskedLM.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowercase= datasets['train'].column_names
else:
__lowercase= datasets['validation'].column_names
__lowercase= 'text' if 'text' in column_names else column_names[0]
__lowercase= 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowercase__ ):
# Remove empty lines
__lowercase= [line for line in examples['text'] if len(lowercase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length )
__lowercase= datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowercase= add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowercase= add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowercase= data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowercase= False
# Data collator
# This one will take care of randomly masking the tokens.
__lowercase= DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase= Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase= last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowercase= model_args.model_name_or_path
else:
__lowercase= None
__lowercase= trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowercase= os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= math.exp(eval_output['eval_loss'] )
__lowercase= perplexity
__lowercase= os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 304
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 1
|
import math
lowerCAmelCase = 1_0
lowerCAmelCase = 7
lowerCAmelCase = BALLS_PER_COLOUR * NUM_COLOURS
def _lowerCamelCase( lowercase__ = 2_0 ) -> str:
'''simple docstring'''
__lowercase= math.comb(lowercase__ , lowercase__ )
__lowercase= math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowercase__ )
__lowercase= NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 304
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 1
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCamelCase( lowercase__ , lowercase__ , **lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= AutoConfig.from_pretrained(lowercase__ , **lowercase__ )
__lowercase= AutoModelForSeqaSeqLM.from_config(lowercase__ )
model.save_pretrained(lowercase__ )
AutoTokenizer.from_pretrained(lowercase__ ).save_pretrained(lowercase__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 1
|
class A ( A_ ):
pass
class A ( A_ ):
pass
class A :
def __init__(self ):
__lowercase= [
[],
[],
[],
]
def _A (self , lowerCAmelCase , lowerCAmelCase ):
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(lowerCAmelCase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def _A (self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__(self ):
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class A :
def __init__(self ):
__lowercase= []
def _A (self , lowerCAmelCase ):
if len(self.queue ) == 1_0_0:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(lowerCAmelCase )
def _A (self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
__lowercase= min(self.queue )
self.queue.remove(lowerCAmelCase )
return data
def __str__(self ):
return str(self.queue )
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 304
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 1
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
lowerCAmelCase = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= list(state_dict.keys() )
for name in state_dict_keys:
__lowercase= state_dict.pop(lowercase__ )
# emb -> embedding
if name.startswith('emb.' ):
__lowercase= name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__lowercase= name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__lowercase= re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , lowercase__ )
# ffn -> feed_forward
__lowercase= re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , lowercase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__lowercase= name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__lowercase= name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__lowercase= name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__lowercase= 'rwkv.' + name
__lowercase= weight
return state_dict
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=None ) -> Union[str, Any]:
'''simple docstring'''
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__lowercase= 5_0_2_7_7
__lowercase= AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__lowercase= PreTrainedTokenizerFast(tokenizer_file=lowercase__ )
__lowercase= len(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
# 2. Build the config
__lowercase= list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase= candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
__lowercase= RwkvConfig(
vocab_size=lowercase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase__ )
# 3. Download model file then convert state_dict
__lowercase= hf_hub_download(lowercase__ , lowercase__ )
__lowercase= torch.load(lowercase__ , map_location='cpu' )
__lowercase= convert_state_dict(lowercase__ )
# 4. Split in shards and save
__lowercase, __lowercase= shard_checkpoint(lowercase__ )
for shard_file, shard in shards.items():
torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
if index is not None:
__lowercase= os.path.join(lowercase__ , lowercase__ )
# Save the index as well
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
__lowercase= json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '\n'
f.write(lowercase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__lowercase= list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase= torch.load(os.path.join(lowercase__ , lowercase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase__ , lowercase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__lowercase= AutoModelForCausalLM.from_pretrained(lowercase__ )
model.push_to_hub(lowercase__ , max_shard_size='2GB' )
tokenizer.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 304
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 1
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= ''
for i in table:
res += inp[i - 1]
return res
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return data[1:] + data[0]
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= ''
for i in range(len(lowercase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= int('0b' + data[0] + data[-1] , 2 )
__lowercase= int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= message[:4]
__lowercase= message[4:]
__lowercase= apply_table(lowercase__ , lowercase__ )
__lowercase= xor(lowercase__ , lowercase__ )
__lowercase= apply_sbox(lowercase__ , temp[:4] ) # noqa: E741
__lowercase= apply_sbox(lowercase__ , temp[4:] )
__lowercase= '0' * (2 - len(lowercase__ )) + l # noqa: E741
__lowercase= '0' * (2 - len(lowercase__ )) + r
__lowercase= apply_table(l + r , lowercase__ )
__lowercase= xor(lowercase__ , lowercase__ )
return temp + right
if __name__ == "__main__":
lowerCAmelCase = input('''Enter 10 bit key: ''')
lowerCAmelCase = input('''Enter 8 bit message: ''')
lowerCAmelCase = [6, 3, 7, 4, 8, 5, 1_0, 9]
lowerCAmelCase = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
lowerCAmelCase = [2, 4, 3, 1]
lowerCAmelCase = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase = apply_table(key, paa_table)
lowerCAmelCase = temp[:5]
lowerCAmelCase = temp[5:]
lowerCAmelCase = left_shift(left)
lowerCAmelCase = left_shift(right)
lowerCAmelCase = apply_table(left + right, pa_table)
lowerCAmelCase = left_shift(left)
lowerCAmelCase = left_shift(right)
lowerCAmelCase = left_shift(left)
lowerCAmelCase = left_shift(right)
lowerCAmelCase = apply_table(left + right, pa_table)
# encryption
lowerCAmelCase = apply_table(message, IP)
lowerCAmelCase = function(expansion, sa, sa, keya, temp)
lowerCAmelCase = temp[4:] + temp[:4]
lowerCAmelCase = function(expansion, sa, sa, keya, temp)
lowerCAmelCase = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowerCAmelCase = apply_table(CT, IP)
lowerCAmelCase = function(expansion, sa, sa, keya, temp)
lowerCAmelCase = temp[4:] + temp[:4]
lowerCAmelCase = function(expansion, sa, sa, keya, temp)
lowerCAmelCase = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 304
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class A :
UpperCamelCase_ : Optional[str] =field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCamelCase_ : Optional[float] =field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCamelCase_ : Optional[int] =field(
default=A_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] =field(
default=A_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _A (self ):
__lowercase= {}
if self.train_dir is not None:
__lowercase= self.train_dir
if self.validation_dir is not None:
__lowercase= self.validation_dir
__lowercase= data_files if data_files else None
@dataclass
class A :
UpperCamelCase_ : str =field(
default=A_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCamelCase_ : str =field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase_ : float =field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class A ( A_ ):
UpperCamelCase_ : float =field(
default=1e-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase, __lowercase, __lowercase= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase= training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowercase= None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase= get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__lowercase= load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase= None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
__lowercase= ds['train'].train_test_split(data_args.train_val_split )
__lowercase= split['train']
__lowercase= split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase= ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
__lowercase= ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__lowercase= ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__lowercase= ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
__lowercase= ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__lowercase= ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__lowercase= ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowercase= ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
__lowercase= ds['train'].column_names
else:
__lowercase= ds['validation'].column_names
if data_args.image_column_name is not None:
__lowercase= data_args.image_column_name
elif "image" in column_names:
__lowercase= 'image'
elif "img" in column_names:
__lowercase= 'img'
else:
__lowercase= column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__lowercase= image_processor.size['shortest_edge']
else:
__lowercase= (image_processor.size['height'], image_processor.size['width'])
__lowercase= Compose(
[
Lambda(lambda lowercase__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ ):
__lowercase= [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowercase= ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowercase= (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
__lowercase= (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__lowercase= training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
__lowercase= Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
__lowercase= None
if training_args.resume_from_checkpoint is not None:
__lowercase= training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase= last_checkpoint
__lowercase= trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase= trainer.evaluate()
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
# Write model card and (optionally) push to hub
__lowercase= {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 304
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 1
|
from collections import deque
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= process_name # process name
__lowercase= arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowercase= arrival_time
__lowercase= burst_time # remaining burst time
__lowercase= 0 # total time of the process wait in ready queue
__lowercase= 0 # time from arrival time to completion time
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# total number of mlfq's queues
__lowercase= number_of_queues
# time slice of queues that round robin algorithm applied
__lowercase= time_slices
# unfinished process is in this ready_queue
__lowercase= queue
# current time
__lowercase= current_time
# finished process is in this sequence queue
__lowercase= deque()
def _A (self ):
__lowercase= []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _A (self , lowerCAmelCase ):
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _A (self , lowerCAmelCase ):
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _A (self , lowerCAmelCase ):
__lowercase= []
for i in range(len(lowerCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _A (self , lowerCAmelCase ):
return [q.burst_time for q in queue]
def _A (self , lowerCAmelCase ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _A (self , lowerCAmelCase ):
__lowercase= deque() # sequence deque of finished process
while len(lowerCAmelCase ) != 0:
__lowercase= ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowercase= 0
# set the process's turnaround time because it is finished
__lowercase= self.current_time - cp.arrival_time
# set the completion time
__lowercase= self.current_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase )
self.finish_queue.extend(lowerCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCAmelCase ) ):
__lowercase= ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowercase= self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowercase= 0
# set the finish time
__lowercase= self.current_time
# update the process' turnaround time because it is finished
__lowercase= self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase )
self.finish_queue.extend(lowerCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _A (self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__lowercase, __lowercase= self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase = Process('''P1''', 0, 5_3)
lowerCAmelCase = Process('''P2''', 0, 1_7)
lowerCAmelCase = Process('''P3''', 0, 6_8)
lowerCAmelCase = Process('''P4''', 0, 2_4)
lowerCAmelCase = 3
lowerCAmelCase = [1_7, 2_5]
lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase = Process('''P1''', 0, 5_3)
lowerCAmelCase = Process('''P2''', 0, 1_7)
lowerCAmelCase = Process('''P3''', 0, 6_8)
lowerCAmelCase = Process('''P4''', 0, 2_4)
lowerCAmelCase = 3
lowerCAmelCase = [1_7, 2_5]
lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 304
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 1
|
from ...processing_utils import ProcessorMixin
class A ( A_ ):
UpperCamelCase_ : str ='''SpeechT5FeatureExtractor'''
UpperCamelCase_ : int ='''SpeechT5Tokenizer'''
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__(self , *lowerCAmelCase , **lowerCAmelCase ):
__lowercase= kwargs.pop('audio' , lowerCAmelCase )
__lowercase= kwargs.pop('text' , lowerCAmelCase )
__lowercase= kwargs.pop('text_target' , lowerCAmelCase )
__lowercase= kwargs.pop('audio_target' , lowerCAmelCase )
__lowercase= kwargs.pop('sampling_rate' , lowerCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
__lowercase= self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
elif text is not None:
__lowercase= self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
else:
__lowercase= None
if audio_target is not None:
__lowercase= self.feature_extractor(audio_target=lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
__lowercase= targets['input_values']
elif text_target is not None:
__lowercase= self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
__lowercase= targets['input_ids']
else:
__lowercase= None
if inputs is None:
return targets
if targets is not None:
__lowercase= labels
__lowercase= targets.get('attention_mask' )
if decoder_attention_mask is not None:
__lowercase= decoder_attention_mask
return inputs
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
__lowercase= kwargs.pop('input_values' , lowerCAmelCase )
__lowercase= kwargs.pop('input_ids' , lowerCAmelCase )
__lowercase= kwargs.pop('labels' , lowerCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
__lowercase= self.feature_extractor.pad(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
elif input_ids is not None:
__lowercase= self.tokenizer.pad(lowerCAmelCase , **lowerCAmelCase )
else:
__lowercase= None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCAmelCase , lowerCAmelCase ) and "input_ids" in labels[0]):
__lowercase= self.tokenizer.pad(lowerCAmelCase , **lowerCAmelCase )
__lowercase= targets['input_ids']
else:
__lowercase= self.feature_extractor.feature_size
__lowercase= self.feature_extractor.num_mel_bins
__lowercase= self.feature_extractor.pad(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
__lowercase= feature_size_hack
__lowercase= targets['input_values']
else:
__lowercase= None
if inputs is None:
return targets
if targets is not None:
__lowercase= labels
__lowercase= targets.get('attention_mask' )
if decoder_attention_mask is not None:
__lowercase= decoder_attention_mask
return inputs
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
| 304
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 1
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
UpperCamelCase_ : Optional[Any] =ViTImageProcessor if is_vision_available() else None
@property
def _A (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A (self ):
__lowercase= (3, 3_2, 1_2_8)
__lowercase= tempfile.mkdtemp()
# fmt: off
__lowercase= ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
__lowercase= {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 3_2, 'width': 1_2_8},
}
__lowercase= os.path.join(self.tmpdirname , lowerCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
def _A (self , **lowerCAmelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _A (self , **lowerCAmelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _A (self ):
shutil.rmtree(self.tmpdirname )
def _A (self ):
__lowercase= np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
__lowercase= Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) )
return image_input
def _A (self ):
__lowercase= self.get_tokenizer()
__lowercase= self.get_image_processor()
__lowercase= MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase= MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def _A (self ):
__lowercase= self.get_tokenizer()
__lowercase= self.get_image_processor()
__lowercase= MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase= self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__lowercase= self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
__lowercase= MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def _A (self ):
__lowercase= self.get_image_processor()
__lowercase= self.get_tokenizer()
__lowercase= MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowercase= self.prepare_image_inputs()
__lowercase= image_processor(lowerCAmelCase , return_tensors='np' )
__lowercase= processor(images=lowerCAmelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A (self ):
__lowercase= self.get_image_processor()
__lowercase= self.get_tokenizer()
__lowercase= MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowercase= 'test'
__lowercase= processor(text=lowerCAmelCase )
__lowercase= tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A (self ):
__lowercase= self.get_image_processor()
__lowercase= self.get_tokenizer()
__lowercase= MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowercase= 'test'
__lowercase= self.prepare_image_inputs()
__lowercase= processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def _A (self ):
__lowercase= self.get_image_processor()
__lowercase= self.get_tokenizer()
__lowercase= MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowercase= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowercase= processor.char_decode(lowerCAmelCase )
__lowercase= tokenizer.batch_decode(lowerCAmelCase )
__lowercase= [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.get_image_processor()
__lowercase= self.get_tokenizer()
__lowercase= MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowercase= None
__lowercase= self.prepare_image_inputs()
__lowercase= processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _A (self ):
__lowercase= self.get_image_processor()
__lowercase= self.get_tokenizer()
__lowercase= MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowercase= torch.randn(1 , 2_7 , 3_8 )
__lowercase= torch.randn(1 , 2_7 , 5_0_2_5_7 )
__lowercase= torch.randn(1 , 2_7 , 3_0_5_2_2 )
__lowercase= processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 304
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A ( A_ ):
UpperCamelCase_ : Tuple =(
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
UpperCamelCase_ : str ='''CIDAS/clipseg-rd64-refined'''
UpperCamelCase_ : Union[str, Any] ='''image_segmenter'''
UpperCamelCase_ : int =CLIPSegForImageSegmentation
UpperCamelCase_ : Tuple =['''image''', '''text''']
UpperCamelCase_ : Dict =['''image''']
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ['vision'] )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase , return_tensors='pt' )
def _A (self , lowerCAmelCase ):
with torch.no_grad():
__lowercase= self.model(**lowerCAmelCase ).logits
return logits
def _A (self , lowerCAmelCase ):
__lowercase= outputs.cpu().detach().numpy()
__lowercase= 0
__lowercase= 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : str =StableDiffusionDiffEditPipeline
UpperCamelCase_ : str =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
UpperCamelCase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
UpperCamelCase_ : Optional[int] =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ : Dict =frozenset([] )
def _A (self ):
torch.manual_seed(0 )
__lowercase= UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase , )
__lowercase= DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
__lowercase= DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_zero=lowerCAmelCase , )
torch.manual_seed(0 )
__lowercase= AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__lowercase= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__lowercase= CLIPTextModel(lowerCAmelCase )
__lowercase= CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowercase= {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
__lowercase= floats_tensor((1, 1_6, 1_6) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
__lowercase= floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
__lowercase= torch.manual_seed(lowerCAmelCase )
else:
__lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowercase= {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
__lowercase= floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
__lowercase= image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
__lowercase= torch.manual_seed(lowerCAmelCase )
else:
__lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowercase= {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
__lowercase= floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
__lowercase= image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
__lowercase= torch.manual_seed(lowerCAmelCase )
else:
__lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowercase= {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def _A (self ):
if not hasattr(self.pipeline_class , '_optional_components' ):
return
__lowercase= self.get_dummy_components()
__lowercase= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase , lowerCAmelCase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= pipe_loaded(**lowerCAmelCase )[0]
__lowercase= np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase , 1E-4 )
def _A (self ):
__lowercase= 'cpu'
__lowercase= self.get_dummy_components()
__lowercase= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_mask_inputs(lowerCAmelCase )
__lowercase= pipe.generate_mask(**lowerCAmelCase )
__lowercase= mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
__lowercase= np.array([0] * 9 )
__lowercase= np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _A (self ):
__lowercase= 'cpu'
__lowercase= self.get_dummy_components()
__lowercase= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inversion_inputs(lowerCAmelCase )
__lowercase= pipe.invert(**lowerCAmelCase ).images
__lowercase= image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
__lowercase= np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
__lowercase= np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1E-3 )
def _A (self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _A (self ):
__lowercase= 'cpu'
__lowercase= self.get_dummy_components()
__lowercase= {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
__lowercase= DPMSolverMultistepScheduler(**lowerCAmelCase )
__lowercase= DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
__lowercase= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inversion_inputs(lowerCAmelCase )
__lowercase= pipe.invert(**lowerCAmelCase ).images
__lowercase= image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
__lowercase= np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
__lowercase= np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
def _A (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _A (cls ):
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
__lowercase= raw_image.convert('RGB' ).resize((7_6_8, 7_6_8) )
__lowercase= raw_image
def _A (self ):
__lowercase= torch.manual_seed(0 )
__lowercase= StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
__lowercase= DDIMScheduler.from_config(pipe.scheduler.config )
__lowercase= DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'a bowl of fruit'
__lowercase= 'a bowl of pears'
__lowercase= pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase , target_prompt=lowerCAmelCase , generator=lowerCAmelCase , )
__lowercase= pipe.invert(
prompt=lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase ).latents
__lowercase= pipe(
prompt=lowerCAmelCase , mask_image=lowerCAmelCase , image_latents=lowerCAmelCase , generator=lowerCAmelCase , negative_prompt=lowerCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
__lowercase= (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _A (self ):
__lowercase= torch.manual_seed(0 )
__lowercase= StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
__lowercase= DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase= DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'a bowl of fruit'
__lowercase= 'a bowl of pears'
__lowercase= pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase , target_prompt=lowerCAmelCase , generator=lowerCAmelCase , )
__lowercase= pipe.invert(
prompt=lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase , num_inference_steps=2_5 , ).latents
__lowercase= pipe(
prompt=lowerCAmelCase , mask_image=lowerCAmelCase , image_latents=lowerCAmelCase , generator=lowerCAmelCase , negative_prompt=lowerCAmelCase , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='numpy' , ).images[0]
__lowercase= (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 304
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 1
|
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
__lowercase= _modexpt(lowercase__ , exponent // 2 , lowercase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase__ , exponent - 1 , lowercase__ )) % modulo_value
def _lowerCamelCase( lowercase__ = 1_7_7_7 , lowercase__ = 1_8_5_5 , lowercase__ = 8 ) -> int:
'''simple docstring'''
__lowercase= base
for _ in range(1 , lowercase__ ):
__lowercase= _modexpt(lowercase__ , lowercase__ , 1_0**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( A_ , A_ ):
@register_to_config
def __init__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
super().__init__()
__lowercase= learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowercase= torch.zeros(lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= None
__lowercase= torch.nn.Parameter(lowerCAmelCase )
class A ( A_ ):
UpperCamelCase_ : VQModel
UpperCamelCase_ : CLIPTextModel
UpperCamelCase_ : CLIPTokenizer
UpperCamelCase_ : TransformeraDModel
UpperCamelCase_ : LearnedClassifierFreeSamplingEmbeddings
UpperCamelCase_ : VQDiffusionScheduler
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
super().__init__()
self.register_modules(
vqvae=lowerCAmelCase , transformer=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , scheduler=lowerCAmelCase , learned_classifier_free_sampling_embeddings=lowerCAmelCase , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= len(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else 1
# get prompt text embeddings
__lowercase= self.tokenizer(
lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__lowercase= text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase= self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowercase= text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase= self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowercase= prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
__lowercase= prompt_embeds.repeat_interleave(lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowercase= self.learned_classifier_free_sampling_embeddings.embeddings
__lowercase= negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCAmelCase , 1 , 1 )
else:
__lowercase= [''] * batch_size
__lowercase= text_input_ids.shape[-1]
__lowercase= self.tokenizer(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='pt' , )
__lowercase= self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowercase= negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase= negative_prompt_embeds.shape[1]
__lowercase= negative_prompt_embeds.repeat(1 , lowerCAmelCase , 1 )
__lowercase= negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase= torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__(self , lowerCAmelCase , lowerCAmelCase = 1_0_0 , lowerCAmelCase = 5.0 , lowerCAmelCase = 1.0 , lowerCAmelCase = 1 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = 1 , ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= 1
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= len(lowerCAmelCase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase )}' )
__lowercase= batch_size * num_images_per_prompt
__lowercase= guidance_scale > 1.0
__lowercase= self._encode_prompt(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase , lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(lowerCAmelCase )}.' )
# get the initial completely masked latents unless the user supplied it
__lowercase= (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowercase= self.transformer.num_vector_embeds - 1
__lowercase= torch.full(lowerCAmelCase , lowerCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowercase= latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase , device=self.device )
__lowercase= self.scheduler.timesteps.to(self.device )
__lowercase= latents
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
__lowercase= torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowercase= self.transformer(lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , timestep=lowerCAmelCase ).sample
if do_classifier_free_guidance:
__lowercase, __lowercase= model_output.chunk(2 )
__lowercase= model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCAmelCase , dim=1 , keepdim=lowerCAmelCase )
__lowercase= self.truncate(lowerCAmelCase , lowerCAmelCase )
# remove `log(0)`'s (`-inf`s)
__lowercase= model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase= self.scheduler.step(lowerCAmelCase , timestep=lowerCAmelCase , sample=lowerCAmelCase , generator=lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= self.vqvae.config.vq_embed_dim
__lowercase= (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowercase= self.vqvae.quantize.get_codebook_entry(lowerCAmelCase , shape=lowerCAmelCase )
__lowercase= self.vqvae.decode(lowerCAmelCase , force_not_quantize=lowerCAmelCase ).sample
__lowercase= (image / 2 + 0.5).clamp(0 , 1 )
__lowercase= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase= self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= torch.sort(lowerCAmelCase , 1 , descending=lowerCAmelCase )
__lowercase= torch.exp(lowerCAmelCase )
__lowercase= sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowercase= torch.full_like(keep_mask[:, 0:1, :] , lowerCAmelCase )
__lowercase= torch.cat((all_true, keep_mask) , dim=1 )
__lowercase= keep_mask[:, :-1, :]
__lowercase= keep_mask.gather(1 , indices.argsort(1 ) )
__lowercase= log_p_x_0.clone()
__lowercase= -torch.inf # -inf = log(0)
return rv
| 304
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 1
|
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> float:
'''simple docstring'''
__lowercase= a
__lowercase= b
if function(lowercase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase__ ) == 0:
return b
elif (
function(lowercase__ ) * function(lowercase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
__lowercase= start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(lowercase__ ) == 0:
return mid
elif function(lowercase__ ) * function(lowercase__ ) < 0:
__lowercase= mid
else:
__lowercase= mid
__lowercase= start + (end - start) / 2.0
return mid
def _lowerCamelCase( lowercase__ ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 304
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 1
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=[1, 1, 2] , lowerCAmelCase=1 , lowerCAmelCase=3_2 , lowerCAmelCase=4 , lowerCAmelCase=8 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=3 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , lowerCAmelCase=False , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= block_sizes
__lowercase= num_decoder_layers
__lowercase= d_model
__lowercase= n_head
__lowercase= d_head
__lowercase= d_inner
__lowercase= hidden_act
__lowercase= hidden_dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= 2
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
__lowercase= initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase= n_head
# Used in the tests to check the size of the first hidden state
__lowercase= self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase= sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase= self.num_hidden_layers + 2
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelModel(config=lowerCAmelCase )
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase= model(lowerCAmelCase )
__lowercase= [input_ids, input_mask]
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase= False
__lowercase= TFFunnelModel(config=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase= False
__lowercase= TFFunnelModel(config=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelBaseModel(config=lowerCAmelCase )
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase= model(lowerCAmelCase )
__lowercase= [input_ids, input_mask]
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowercase= False
__lowercase= TFFunnelBaseModel(config=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowercase= False
__lowercase= TFFunnelBaseModel(config=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelForPreTraining(config=lowerCAmelCase )
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelForMaskedLM(config=lowerCAmelCase )
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= TFFunnelForSequenceClassification(config=lowerCAmelCase )
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= TFFunnelForMultipleChoice(config=lowerCAmelCase )
__lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= TFFunnelForTokenClassification(config=lowerCAmelCase )
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelForQuestionAnswering(config=lowerCAmelCase )
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : List[Any] =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Union[str, Any] =(
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : int =False
UpperCamelCase_ : int =False
def _A (self ):
__lowercase= TFFunnelModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
@require_tf
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ : Any =False
UpperCamelCase_ : List[str] =False
def _A (self ):
__lowercase= TFFunnelModelTester(self , base=lowerCAmelCase )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
| 304
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 1
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 , lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 304
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
lowerCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
__lowercase= {}
with open(lowercase__ , 'r' ) as file:
for line_number, line in enumerate(lowercase__ ):
__lowercase= line.strip()
if line:
__lowercase= line.split()
__lowercase= line_number
__lowercase= words[0]
__lowercase= value
return result
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
for attribute in key.split('.' ):
__lowercase= getattr(lowercase__ , lowercase__ )
__lowercase= None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase__ ):
__lowercase= PARAM_MAPPING[full_name.split('.' )[-1]]
__lowercase= 'param'
if weight_type is not None and weight_type != "param":
__lowercase= getattr(lowercase__ , lowercase__ ).shape
elif weight_type is not None and weight_type == "param":
__lowercase= hf_pointer
for attribute in hf_param_name.split('.' ):
__lowercase= getattr(lowercase__ , lowercase__ )
__lowercase= shape_pointer.shape
# let's reduce dimension
__lowercase= value[0]
else:
__lowercase= hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowercase= value
elif weight_type == "weight_g":
__lowercase= value
elif weight_type == "weight_v":
__lowercase= value
elif weight_type == "bias":
__lowercase= value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__lowercase= getattr(lowercase__ , lowercase__ )
__lowercase= value
else:
__lowercase= value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase__ ):
__lowercase= PARAM_MAPPING[full_name.split('.' )[-1]]
__lowercase= 'param'
if weight_type is not None and weight_type != "param":
__lowercase= '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowercase= '.'.join([key, hf_param_name] )
else:
__lowercase= key
__lowercase= value if 'lm_head' in full_key else value[0]
lowerCAmelCase = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ) -> List[str]:
'''simple docstring'''
__lowercase= False
for key, mapped_key in MAPPING.items():
__lowercase= 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase= True
if "*" in mapped_key:
__lowercase= name.split(lowercase__ )[0].split('.' )[-2]
__lowercase= mapped_key.replace('*' , lowercase__ )
if "weight_g" in name:
__lowercase= 'weight_g'
elif "weight_v" in name:
__lowercase= 'weight_v'
elif "bias" in name:
__lowercase= 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase= 'weight'
else:
__lowercase= None
if hf_dict is not None:
rename_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return is_used
return is_used
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= []
__lowercase= fairseq_model.state_dict()
__lowercase= hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowercase= False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowercase= True
else:
__lowercase= load_wavaveca_layer(lowercase__ , lowercase__ , lowercase__ )
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= full_name.split('conv_layers.' )[-1]
__lowercase= name.split('.' )
__lowercase= int(items[0] )
__lowercase= int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowercase= value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowercase= value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__lowercase= value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__lowercase= value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__=False ) -> Dict:
'''simple docstring'''
if config_path is not None:
__lowercase= WavaVecaConfig.from_pretrained(lowercase__ )
else:
__lowercase= WavaVecaConfig()
if is_seq_class:
__lowercase= read_txt_into_dict(lowercase__ )
__lowercase= idalabel
__lowercase= WavaVecaForSequenceClassification(lowercase__ )
__lowercase= WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
feature_extractor.save_pretrained(lowercase__ )
elif is_finetuned:
if dict_path:
__lowercase= Dictionary.load(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase= target_dict.pad_index
__lowercase= target_dict.bos_index
__lowercase= target_dict.eos_index
__lowercase= len(target_dict.symbols )
__lowercase= os.path.join(lowercase__ , 'vocab.json' )
if not os.path.isdir(lowercase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
__lowercase= target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase= 0
__lowercase= 1
with open(lowercase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase__ , lowercase__ )
__lowercase= WavaVecaCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase__ , )
__lowercase= True if config.feat_extract_norm == 'layer' else False
__lowercase= WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
__lowercase= WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
__lowercase= WavaVecaForCTC(lowercase__ )
else:
__lowercase= WavaVecaForPreTraining(lowercase__ )
if is_finetuned or is_seq_class:
__lowercase, __lowercase, __lowercase= fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__lowercase= argparse.Namespace(task='audio_pretraining' )
__lowercase= fairseq.tasks.setup_task(lowercase__ )
__lowercase, __lowercase, __lowercase= fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ )
__lowercase= model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 304
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 1
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= 0
__lowercase= len(lowercase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase= left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
__lowercase= sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowercase= left
__lowercase= point
elif point > right:
__lowercase= right
__lowercase= point
else:
if item < current_item:
__lowercase= point - 1
else:
__lowercase= point + 1
return None
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase= left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif point > right:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , lowercase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , point + 1 , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
if collection != sorted(lowercase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowerCAmelCase = 0
if debug == 1:
lowerCAmelCase = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowerCAmelCase = 6_7
lowerCAmelCase = interpolation_search(collection, target)
if result is not None:
print(F'{target} found at positions: {result}')
else:
print('''Not found''')
| 304
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 1
|
from functools import reduce
lowerCAmelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _lowerCamelCase( lowercase__ = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase__ , lowercase__ : str(int(lowercase__ ) * int(lowercase__ ) ) , n[i : i + 1_3] ) )
for i in range(len(lowercase__ ) - 1_2 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 304
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 1
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="None" , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= relative_attention
__lowercase= position_biased_input
__lowercase= pos_att_type
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _A (self , lowerCAmelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )[0]
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase )[0]
__lowercase= model(lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DebertaVaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DebertaVaForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : List[Any] =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] =(
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : int =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Dict =False
UpperCamelCase_ : List[str] =False
def _A (self ):
__lowercase= DebertaVaModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= DebertaVaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def _A (self ):
pass
@slow
def _A (self ):
__lowercase= DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__lowercase= torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
# compare the actual values for a slice.
__lowercase= torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 304
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 1
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class A ( datasets.BuilderConfig ):
UpperCamelCase_ : Optional[datasets.Features] =None
def _lowerCamelCase( lowercase__ , lowercase__ , ) -> List[str]:
'''simple docstring'''
import pyspark
def generate_fn():
__lowercase= df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__lowercase= df_with_partition_id.select('*' ).where(F'part_id = {partition_id}' ).drop('part_id' )
__lowercase= partition_df.collect()
__lowercase= 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class A ( _BaseExamplesIterable ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=None , ):
__lowercase= df
__lowercase= partition_order or range(self.df.rdd.getNumPartitions() )
__lowercase= _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
yield from self.generate_examples_fn()
def _A (self , lowerCAmelCase ):
__lowercase= list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.split_shard_indices_by_worker(lowerCAmelCase , lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
@property
def _A (self ):
return len(self.partition_order )
class A ( datasets.DatasetBuilder ):
UpperCamelCase_ : Optional[Any] =SparkConfig
def __init__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
import pyspark
__lowercase= pyspark.sql.SparkSession.builder.getOrCreate()
__lowercase= df
__lowercase= working_dir
super().__init__(
cache_dir=lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase , )
def _A (self ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase )
__lowercase= os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCAmelCase , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowercase= (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def _A (self ):
return datasets.DatasetInfo(features=self.config.features )
def _A (self , lowerCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _A (self , lowerCAmelCase ):
import pyspark
def get_arrow_batch_size(lowerCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__lowercase= self.df.count()
__lowercase= df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowercase= (
self.df.limit(lowerCAmelCase )
.repartition(1 )
.mapInArrow(lowerCAmelCase , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowercase= approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowercase= min(lowerCAmelCase , int(approx_total_size / max_shard_size ) )
__lowercase= self.df.repartition(lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
import pyspark
__lowercase= ParquetWriter if file_format == 'parquet' else ArrowWriter
__lowercase= os.path.join(self._working_dir , os.path.basename(lowerCAmelCase ) ) if self._working_dir else fpath
__lowercase= file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowercase= self.config.features
__lowercase= self._writer_batch_size
__lowercase= self._fs.storage_options
def write_arrow(lowerCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowercase= pyspark.TaskContext().taskAttemptId()
__lowercase= next(lowerCAmelCase , lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__lowercase= 0
__lowercase= writer_class(
features=lowerCAmelCase , path=working_fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
__lowercase= pa.Table.from_batches([first_batch] )
writer.write_table(lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowercase, __lowercase= writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__lowercase= writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
__lowercase= pa.Table.from_batches([batch] )
writer.write_table(lowerCAmelCase )
if writer._num_bytes > 0:
__lowercase, __lowercase= writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCAmelCase ) ):
__lowercase= os.path.join(os.path.dirname(lowerCAmelCase ) , os.path.basename(lowerCAmelCase ) )
shutil.move(lowerCAmelCase , lowerCAmelCase )
__lowercase= (
self.df.mapInArrow(lowerCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _A (self , lowerCAmelCase , lowerCAmelCase = "arrow" , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
self._validate_cache_dir()
__lowercase= convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCAmelCase )
__lowercase= not is_remote_filesystem(self._fs )
__lowercase= os.path.join if is_local else posixpath.join
__lowercase= '-TTTTT-SSSSS-of-NNNNN'
__lowercase= f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
__lowercase= path_join(self._output_dir , lowerCAmelCase )
__lowercase= 0
__lowercase= 0
__lowercase= 0
__lowercase= []
__lowercase= []
for task_id, content in self._prepare_split_single(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCAmelCase )
__lowercase= total_num_examples
__lowercase= total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
__lowercase= all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowercase= self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
rename(
lowerCAmelCase , fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , fpath.replace('TTTTT-SSSSS' , f'{global_shard_id:05d}' ).replace('NNNNN' , f'{total_shards:05d}' ) , )
__lowercase= []
__lowercase= 0
for i in range(len(lowerCAmelCase ) ):
__lowercase, __lowercase= task_id_and_num_shards[i]
for shard_id in range(lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCAmelCase , len(lowerCAmelCase ) ).map(lambda lowerCAmelCase : _rename_shard(*lowerCAmelCase ) ).collect()
else:
# don't use any pattern
__lowercase= 0
__lowercase= task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'{shard_id:05d}' ).replace('TTTTT' , f'{task_id:05d}' ) , fpath.replace(lowerCAmelCase , '' ) , )
def _A (self , lowerCAmelCase , ):
return SparkExamplesIterable(self.df )
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 1
|
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase = [8, 5, 9, 7]
lowerCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= claim_vector
__lowercase= allocated_resources_table
__lowercase= maximum_claim_table
def _A (self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A (self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A (self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A (self ):
return {self.__need().index(__a ): i for i in self.__need()}
def _A (self , **lowerCAmelCase ):
__lowercase= self.__need()
__lowercase= self.__allocated_resources_table
__lowercase= self.__available_resources()
__lowercase= self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 5_0 + '\n' )
while need_list:
__lowercase= False
for each_need in need_list:
__lowercase= True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__lowercase= False
break
if execution:
__lowercase= True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowercase= original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__lowercase= np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def _A (self ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__a ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__a ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 1_6
lowerCAmelCase = 3_2
def _lowerCamelCase( lowercase__ , lowercase__ = 1_6 ) -> str:
'''simple docstring'''
__lowercase= AutoTokenizer.from_pretrained('bert-base-cased' )
__lowercase= load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
__lowercase= tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase= datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase= tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase= 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase= 1_6
elif accelerator.mixed_precision != "no":
__lowercase= 8
else:
__lowercase= None
return tokenizer.pad(
lowercase__ , padding='longest' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='pt' , )
# Instantiate dataloaders.
__lowercase= DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
__lowercase= DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase = mocked_dataloaders # noqa: F811
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowercase__ ) == "1":
__lowercase= 2
# New Code #
__lowercase= int(args.gradient_accumulation_steps )
__lowercase= int(args.local_sgd_steps )
# Initialize accelerator
__lowercase= Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase= config["""lr"""]
__lowercase= int(config['num_epochs'] )
__lowercase= int(config['seed'] )
__lowercase= int(config['batch_size'] )
__lowercase= evaluate.load('glue' , 'mrpc' )
set_seed(lowercase__ )
__lowercase= get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase= AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase= model.to(accelerator.device )
# Instantiate optimizer
__lowercase= AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
__lowercase= get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase= accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
__lowercase= model(**lowercase__ )
__lowercase= output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase= model(**lowercase__ )
__lowercase= outputs.logits.argmax(dim=-1 )
__lowercase= accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
__lowercase= metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase__ )
def _lowerCamelCase( ) -> Tuple:
'''simple docstring'''
__lowercase= argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase__ , default=lowercase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=lowercase__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=lowercase__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowercase= parser.parse_args()
__lowercase= {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= name
__lowercase= value
__lowercase= weight
def __repr__(self ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _A (self ):
return self.value
def _A (self ):
return self.name
def _A (self ):
return self.weight
def _A (self ):
return self.value / self.weight
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= []
for i in range(len(__UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= sorted(__UpperCAmelCase , key=__UpperCAmelCase , reverse=__UpperCAmelCase )
__lowercase= []
__lowercase= 0.0, 0.0
for i in range(len(__UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _lowerCamelCase( ) -> str:
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
from ...processing_utils import ProcessorMixin
class A ( A__ ):
UpperCamelCase_ : List[Any] ='''WhisperFeatureExtractor'''
UpperCamelCase_ : Optional[int] ='''WhisperTokenizer'''
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
super().__init__(__snake_case , __snake_case )
__lowercase= self.feature_extractor
__lowercase= False
def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__(self , *lowerCAmelCase , **lowerCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
__lowercase= kwargs.pop('audio' , __snake_case )
__lowercase= kwargs.pop('sampling_rate' , __snake_case )
__lowercase= kwargs.pop('text' , __snake_case )
if len(__snake_case ) > 0:
__lowercase= args[0]
__lowercase= args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__lowercase= self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
__lowercase= self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase= encodings['input_ids']
return inputs
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
def _A (self , lowerCAmelCase , lowerCAmelCase="np" ):
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
from bisect import bisect
from itertools import accumulate
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowercase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowercase= [i[0] for i in r], [i[1] for i in r]
__lowercase= list(accumulate(lowerCamelCase__ ) )
__lowercase= bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A ( lowercase__ , unittest.TestCase ):
UpperCamelCase_ : List[Any] =CTRLTokenizer
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Union[str, Any] =False
def _A (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase= ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
__lowercase= dict(zip(_a , range(len(_a ) ) ) )
__lowercase= ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
__lowercase= {'unk_token': '<unk>'}
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_a )
def _A (self , lowerCAmelCase ):
__lowercase= 'adapt react readapt apt'
__lowercase= 'adapt react readapt apt'
return input_text, output_text
def _A (self ):
__lowercase= CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase= 'adapt react readapt apt'
__lowercase= 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
__lowercase= tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__lowercase= tokens + [tokenizer.unk_token]
__lowercase= [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool:
'''simple docstring'''
__lowercase= len(__snake_case )
__lowercase= len(__snake_case )
__lowercase= [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase= True
for i in range(__snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase= True
if a[i].islower():
__lowercase= True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''timesformer'''
def __init__(self , lowerCAmelCase=2_2_4 , lowerCAmelCase=1_6 , lowerCAmelCase=3 , lowerCAmelCase=8 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-6 , lowerCAmelCase=True , lowerCAmelCase="divided_space_time" , lowerCAmelCase=0 , **lowerCAmelCase , ):
super().__init__(**_snake_case )
__lowercase= image_size
__lowercase= patch_size
__lowercase= num_channels
__lowercase= num_frames
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= qkv_bias
__lowercase= attention_type
__lowercase= drop_path_rate
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
from __future__ import annotations
from statistics import mean
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= [0] * no_of_processes
__lowercase= [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowercase__ ):
__lowercase= burst_time[i]
__lowercase= []
__lowercase= 0
__lowercase= 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowercase= []
__lowercase= -1
for i in range(lowercase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowercase__ )
if len(lowercase__ ) > 0:
__lowercase= ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowercase= i
total_time += burst_time[target_process]
completed += 1
__lowercase= 0
__lowercase= (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= [0] * no_of_processes
for i in range(lowercase__ ):
__lowercase= burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
lowerCAmelCase = 4
lowerCAmelCase = [2, 5, 3, 7]
lowerCAmelCase = [0, 0, 0, 0]
lowerCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
import math
import unittest
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A ( unittest.TestCase ):
def _A (self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def _A (self ):
with self.assertRaises(_a ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A ( nn.Module ):
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
__lowercase= module
__lowercase= nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase , bias=lowerCAmelCase ) , nn.Linear(lowerCAmelCase , module.out_features , bias=lowerCAmelCase ) , )
__lowercase= (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return self.module(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ) + self.adapter(lowerCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
UpperCamelCase_ : Tuple ='''bigscience/bloom-1b7'''
# Constant values
UpperCamelCase_ : Optional[int] =2.109659552692574
UpperCamelCase_ : Optional[int] ='''Hello my name is'''
UpperCamelCase_ : Optional[Any] =set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
UpperCamelCase_ : str =10
def _A (self ):
# Models and tokenizer
__lowercase= AutoTokenizer.from_pretrained(self.model_name )
class A ( A_ ):
def _A (self ):
super().setUp()
# Models and tokenizer
__lowercase= AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase= AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
def _A (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase , 'quantization_config' ) )
__lowercase= config.to_dict()
__lowercase= config.to_diff_dict()
__lowercase= config.to_json_string()
def _A (self ):
from bitsandbytes.nn import Paramsabit
__lowercase= self.model_fpaa.get_memory_footprint()
__lowercase= self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase= get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _A (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _A (self ):
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase= self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def _A (self ):
__lowercase= BitsAndBytesConfig()
__lowercase= True
__lowercase= AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , device_map='auto' )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase= model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def _A (self ):
with self.assertRaises(lowerCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase )
def _A (self ):
__lowercase= BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase ):
__lowercase= AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , load_in_abit=lowerCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def _A (self ):
with self.assertRaises(lowerCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase= self.model_fpaa.to(torch.floataa )
__lowercase= self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
__lowercase= self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase= self.model_fpaa.half()
# Check this does not throw an error
__lowercase= self.model_fpaa.float()
def _A (self ):
__lowercase= AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowerCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
@classmethod
def _A (cls ):
__lowercase= 't5-small'
__lowercase= 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase= AutoTokenizer.from_pretrained(cls.model_name )
__lowercase= 'Translate in German: Hello, my dog is cute'
def _A (self ):
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
from transformers import TaForConditionalGeneration
__lowercase= TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase= None
# test with `t5-small`
__lowercase= TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase= model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
__lowercase= TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase= model.generate(**lowerCAmelCase )
__lowercase= modules
def _A (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase= TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase= model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
__lowercase= TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase= model.generate(**lowerCAmelCase )
class A ( A_ ):
def _A (self ):
super().setUp()
# model_name
__lowercase= 'bigscience/bloom-560m'
__lowercase= 't5-small'
# Different types of model
__lowercase= AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase= AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# CausalLM model
__lowercase= AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase= AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase , device_map='auto' )
def _A (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A ( A_ ):
def _A (self ):
super().setUp()
def _A (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase= self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A ( A_ ):
def _A (self ):
super().setUp()
def _A (self ):
__lowercase= AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase= self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase= model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
class A ( A_ ):
def _A (self ):
__lowercase= 'facebook/opt-350m'
super().setUp()
def _A (self ):
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase= AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase= False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase= param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase ) ):
__lowercase= LoRALayer(module.q_proj , rank=1_6 )
__lowercase= LoRALayer(module.k_proj , rank=1_6 )
__lowercase= LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
__lowercase= self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase= model.forward(**lowerCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A ( A_ ):
UpperCamelCase_ : Optional[Any] ='''gpt2-xl'''
UpperCamelCase_ : List[Any] =3.3191854854152187
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class A ( __lowercase , __lowercase ):
UpperCamelCase_ : str ='''resnet'''
UpperCamelCase_ : Dict =['''basic''', '''bottleneck''']
def __init__(self , lowerCAmelCase=3 , lowerCAmelCase=6_4 , lowerCAmelCase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowerCAmelCase=[3, 4, 6, 3] , lowerCAmelCase="bottleneck" , lowerCAmelCase="relu" , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
__lowercase= num_channels
__lowercase= embedding_size
__lowercase= hidden_sizes
__lowercase= depths
__lowercase= layer_type
__lowercase= hidden_act
__lowercase= downsample_in_first_stage
__lowercase= ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(lowerCAmelCase ) + 1 )]
__lowercase= get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
class A ( __lowercase ):
UpperCamelCase_ : List[str] =version.parse('''1.11''' )
@property
def _A (self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _A (self ):
return 1E-3
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class A ( a_ ):
UpperCamelCase_ : torch.FloatTensor
class A ( a_ , a_ ):
@register_to_config
def __init__(self , lowerCAmelCase = 1_6 , lowerCAmelCase = 8_8 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = "geglu" , lowerCAmelCase = True , lowerCAmelCase = True , ):
super().__init__()
__lowercase= num_attention_heads
__lowercase= attention_head_dim
__lowercase= num_attention_heads * attention_head_dim
__lowercase= in_channels
__lowercase= torch.nn.GroupNorm(num_groups=lowercase_ , num_channels=lowercase_ , eps=1E-6 , affine=lowercase_ )
__lowercase= nn.Linear(lowercase_ , lowercase_ )
# 3. Define transformers blocks
__lowercase= nn.ModuleList(
[
BasicTransformerBlock(
lowercase_ , lowercase_ , lowercase_ , dropout=lowercase_ , cross_attention_dim=lowercase_ , activation_fn=lowercase_ , attention_bias=lowercase_ , double_self_attention=lowercase_ , norm_elementwise_affine=lowercase_ , )
for d in range(lowercase_ )
] )
__lowercase= nn.Linear(lowercase_ , lowercase_ )
def _A (self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=1 , lowerCAmelCase=None , lowerCAmelCase = True , ):
__lowercase, __lowercase, __lowercase, __lowercase= hidden_states.shape
__lowercase= batch_frames // num_frames
__lowercase= hidden_states
__lowercase= hidden_states[None, :].reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__lowercase= hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowercase= self.norm(lowercase_ )
__lowercase= hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowercase_ , lowercase_ )
__lowercase= self.proj_in(lowercase_ )
# 2. Blocks
for block in self.transformer_blocks:
__lowercase= block(
lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ , cross_attention_kwargs=lowercase_ , class_labels=lowercase_ , )
# 3. Output
__lowercase= self.proj_out(lowercase_ )
__lowercase= (
hidden_states[None, None, :]
.reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowercase= hidden_states.reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__lowercase= hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowercase_ )
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
"""simple docstring"""
def __init__(self , lowerCAmelCase , lowerCAmelCase=3 , lowerCAmelCase=3_2 , lowerCAmelCase=3 , lowerCAmelCase=1_0 , lowerCAmelCase=[8, 1_6, 3_2, 6_4] , lowerCAmelCase=[1, 1, 2, 1] , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=3 , lowerCAmelCase=None , lowerCAmelCase=["stage2", "stage3", "stage4"] , lowerCAmelCase=[2, 3, 4] , lowerCAmelCase=1 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= num_channels
__lowercase= embeddings_size
__lowercase= hidden_sizes
__lowercase= depths
__lowercase= is_training
__lowercase= use_labels
__lowercase= hidden_act
__lowercase= num_labels
__lowercase= scope
__lowercase= len(_a )
__lowercase= out_features
__lowercase= out_indices
__lowercase= num_groups
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= BitModel(config=_a )
model.to(_a )
model.eval()
__lowercase= model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= BitForImageClassification(_a )
model.to(_a )
model.eval()
__lowercase= model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= BitBackbone(config=_a )
model.to(_a )
model.eval()
__lowercase= model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase= None
__lowercase= BitBackbone(config=_a )
model.to(_a )
model.eval()
__lowercase= model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase= config_and_inputs
__lowercase= {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] =(
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : str =False
def _A (self ):
__lowercase= BitModelTester(self )
__lowercase= ConfigTester(self , config_class=_a , has_text_modality=_a )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Bit does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(_a )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(_a , _a ) )
__lowercase= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase= self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase= self.model_tester.prepare_config_and_inputs_for_common()
__lowercase= ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowercase= layer_type
__lowercase= True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _A (self ):
pass
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _A (self ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowerCamelCase( ) -> Dict:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A (self ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _A (self ):
__lowercase= BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
__lowercase= model(**_a )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
__lowercase= torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str =(BitBackbone,) if is_torch_available() else ()
UpperCamelCase_ : Union[str, Any] =BitConfig
UpperCamelCase_ : List[Any] =False
def _A (self ):
__lowercase= BitModelTester(self )
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('Input value must be a \'int\' type' )
return bin(lowerCAmelCase__ ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
lowerCAmelCase = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= [False] * len(a_ )
__lowercase= [s]
__lowercase= True
while queue:
__lowercase= queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(a_ )
__lowercase= True
__lowercase= u
return visited[t]
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= [-1] * (len(a_ ))
__lowercase= 0
__lowercase= []
__lowercase= [i[:] for i in graph] # Record original cut, copy.
while bfs(a_ , a_ , a_ , a_ ):
__lowercase= float('Inf' )
__lowercase= sink
while s != source:
# Find the minimum value in select path
__lowercase= min(a_ , graph[parent[s]][s] )
__lowercase= parent[s]
max_flow += path_flow
__lowercase= sink
while v != source:
__lowercase= parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase= parent[v]
for i in range(len(a_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase= '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCamelCase( ) -> int:
'''simple docstring'''
assert _test_patching.open is open
__lowercase= '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCamelCase( ) -> Dict:
'''simple docstring'''
__lowercase= '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCamelCase ):
pass
def _lowerCamelCase( ) -> List[Any]:
'''simple docstring'''
__lowercase= '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _UpperCamelCase ) is None
with patch_submodule(_test_patching , 'len' , _UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= '__test_patch_submodule_start_and_stop_mock__'
__lowercase= patch_submodule(_test_patching , 'open' , _UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase= '__test_patch_submodule_successive_join__'
__lowercase= '__test_patch_submodule_successive_dirname__'
__lowercase= '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class A ( a__ , unittest.TestCase ):
UpperCamelCase_ : Any =BartphoTokenizer
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[Any] =True
def _A (self ):
super().setUp()
__lowercase= ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__lowercase= dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__lowercase= {'''unk_token''': '''<unk>'''}
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
__lowercase= BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _A (self , lowerCAmelCase ):
__lowercase= '''This is a là test'''
__lowercase= '''This is a<unk><unk> test'''
return input_text, output_text
def _A (self ):
__lowercase= BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
__lowercase= '''This is a là test'''
__lowercase= '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__lowercase= tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__lowercase= tokens + [tokenizer.unk_token]
__lowercase= [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= args.pruning_method
__lowercase= args.threshold
__lowercase= args.model_name_or_path.rstrip('/' )
__lowercase= args.target_model_path
print(F'Load fine-pruned model from {model_name_or_path}' )
__lowercase= torch.load(os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) )
__lowercase= {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowercase= tensor
print(F'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
__lowercase= tensor
print(F'Copied layer {name}' )
elif "bias" in name:
__lowercase= tensor
print(F'Copied layer {name}' )
else:
if pruning_method == "magnitude":
__lowercase= MagnitudeBinarizer.apply(inputs=UpperCAmelCase__ , threshold=UpperCAmelCase__ )
__lowercase= tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowercase= name[:-6]
__lowercase= model[F'{prefix_}mask_scores']
__lowercase= TopKBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ )
__lowercase= tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowercase= name[:-6]
__lowercase= model[F'{prefix_}mask_scores']
__lowercase= ThresholdBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__lowercase= tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowercase= name[:-6]
__lowercase= model[F'{prefix_}mask_scores']
__lowercase= -0.1, 1.1
__lowercase= torch.sigmoid(UpperCAmelCase__ )
__lowercase= s * (r - l) + l
__lowercase= s_bar.clamp(min=0.0 , max=1.0 )
__lowercase= tensor * mask
print(F'Pruned layer {name}' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
__lowercase= os.path.join(
os.path.dirname(UpperCAmelCase__ ) , F'bertarized_{os.path.basename(UpperCAmelCase__ )}' )
if not os.path.isdir(UpperCAmelCase__ ):
shutil.copytree(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'\nCreated folder {target_model_path}' )
torch.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
lowerCAmelCase = parser.parse_args()
main(args)
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
lowerCAmelCase = range(2, 2_0 + 1)
lowerCAmelCase = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase = {}
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= sum(a_i[j] for j in range(__a , len(__a ) ) )
__lowercase= sum(a_i[j] * base[j] for j in range(min(len(__a ) , __a ) ) )
__lowercase, __lowercase= 0, 0
__lowercase= n - i
__lowercase= memo.get(__a )
if sub_memo is not None:
__lowercase= sub_memo.get(__a )
if jumps is not None and len(__a ) > 0:
# find and make the largest jump without going over
__lowercase= -1
for _k in range(len(__a ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__lowercase= _k
break
if max_jump >= 0:
__lowercase, __lowercase, __lowercase= jumps[max_jump]
# since the difference between jumps is cached, add c
__lowercase= diff + c
for j in range(min(__a , len(__a ) ) ):
__lowercase, __lowercase= divmod(__a , 1_0 )
if new_c > 0:
add(__a , __a , __a )
else:
__lowercase= []
else:
__lowercase= {c: []}
__lowercase= sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__lowercase, __lowercase= next_term(__a , k - 1 , i + dn , __a )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__lowercase, __lowercase= compute(__a , __a , i + dn , __a )
diff += _diff
dn += terms_jumped
__lowercase= sub_memo[c]
# keep jumps sorted by # of terms skipped
__lowercase= 0
while j < len(__a ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__a , (diff, dn, k) )
return (diff, dn)
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__a ):
a_i.extend([0 for _ in range(k - len(__a ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__lowercase= i
__lowercase, __lowercase, __lowercase= 0, 0, 0
for j in range(len(__a ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__lowercase= ds_c + ds_b
diff += addend
__lowercase= 0
for j in range(__a ):
__lowercase= a_i[j] + addend
__lowercase, __lowercase= divmod(__a , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__a , __a , __a )
return diff, i - start_i
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for j in range(__a , len(__a ) ):
__lowercase= digits[j] + addend
if s >= 1_0:
__lowercase, __lowercase= divmod(__a , 1_0 )
__lowercase= addend // 1_0 + quotient
else:
__lowercase= s
__lowercase= addend // 1_0
if addend == 0:
break
while addend > 0:
__lowercase, __lowercase= divmod(__a , 1_0 )
digits.append(__a )
def _lowerCamelCase( lowercase__ = 1_0**1_5 ) -> List[str]:
'''simple docstring'''
__lowercase= [1]
__lowercase= 1
__lowercase= 0
while True:
__lowercase, __lowercase= next_term(__a , 2_0 , i + dn , __a )
dn += terms_jumped
if dn == n - i:
break
__lowercase= 0
for j in range(len(__a ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
from pathlib import Path
import fire
from tqdm import tqdm
def _lowerCamelCase( lowercase__="ro" , lowercase__="en" , lowercase__="wmt16" , lowercase__=None ) -> Optional[int]:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
__lowercase= F'{src_lang}-{tgt_lang}'
print(F'Converting {dataset}-{pair}' )
__lowercase= datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
__lowercase= F'{dataset}-{pair}'
__lowercase= Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(F'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
__lowercase= "val" if split == "validation" else split
__lowercase= save_dir.joinpath(F'{fn}.source' )
__lowercase= save_dir.joinpath(F'{fn}.target' )
__lowercase= src_path.open('w+' )
__lowercase= tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowercase= x["translation"]
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(F'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A ( unittest.TestCase ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=4 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_attention_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_choices
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_attention_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase= config_and_inputs
__lowercase= {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase= config_and_inputs
__lowercase= True
__lowercase= floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A ( __snake_case , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =True
UpperCamelCase_ : Union[str, Any] =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _A (self ):
__lowercase= FlaxRobertaPreLayerNormModelTester(self )
@slow
def _A (self ):
for model_class_name in self.all_model_classes:
__lowercase= model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=a_ )
__lowercase= model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=a_ )
__lowercase= np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
__lowercase= model(a_ )[0]
__lowercase= [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , a_ )
# compare the actual values for a slice.
__lowercase= np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
@slow
def _A (self ):
__lowercase= FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=a_ )
__lowercase= np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
__lowercase= model(a_ )[0]
# compare the actual values for a slice.
__lowercase= np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ = None ) -> Dict:
'''simple docstring'''
__lowercase= tesseract_config if tesseract_config is not None else ''
# apply OCR
__lowercase= to_pil_image(_A )
__lowercase= pil_image.size
__lowercase= pytesseract.image_to_data(_A , lang=_A , output_type='dict' , config=_A )
__lowercase= data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
__lowercase= [idx for idx, word in enumerate(_A ) if not word.strip()]
__lowercase= [word for idx, word in enumerate(_A ) if idx not in irrelevant_indices]
__lowercase= [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
__lowercase= [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
__lowercase= [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
__lowercase= [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowercase= []
for x, y, w, h in zip(_A , _A , _A , _A ):
__lowercase= [x, y, x + w, y + h]
actual_boxes.append(_A )
# finally, normalize the bounding boxes
__lowercase= []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_A , _A , _A ) )
assert len(_A ) == len(_A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A ( SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ : List[str] =['''pixel_values''']
def __init__(self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = "" , **lowerCAmelCase , ):
super().__init__(**snake_case__ )
__lowercase= size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowercase= get_size_dict(snake_case__ )
__lowercase= do_resize
__lowercase= size
__lowercase= resample
__lowercase= apply_ocr
__lowercase= ocr_lang
__lowercase= tesseract_config
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
__lowercase= get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__lowercase= (size['height'], size['width'])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
__lowercase= do_resize if do_resize is not None else self.do_resize
__lowercase= size if size is not None else self.size
__lowercase= get_size_dict(snake_case__ )
__lowercase= resample if resample is not None else self.resample
__lowercase= apply_ocr if apply_ocr is not None else self.apply_ocr
__lowercase= ocr_lang if ocr_lang is not None else self.ocr_lang
__lowercase= tesseract_config if tesseract_config is not None else self.tesseract_config
__lowercase= make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
__lowercase= [to_numpy_array(snake_case__ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
__lowercase= []
__lowercase= []
for image in images:
__lowercase= apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
__lowercase= [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowercase= [flip_channel_order(snake_case__ ) for image in images]
__lowercase= [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
__lowercase= BatchFeature(data={'pixel_values': images} , tensor_type=snake_case__ )
if apply_ocr:
__lowercase= words_batch
__lowercase= boxes_batch
return data
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : int ='''gpt_bigcode'''
UpperCamelCase_ : List[Any] =['''past_key_values''']
UpperCamelCase_ : Any ={
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , lowerCAmelCase=5_0_2_5_7 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=None , lowerCAmelCase="gelu_pytorch_tanh" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1E-5 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=5_0_2_5_6 , lowerCAmelCase=5_0_2_5_6 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= n_positions
__lowercase= n_embd
__lowercase= n_layer
__lowercase= n_head
__lowercase= n_inner
__lowercase= activation_function
__lowercase= resid_pdrop
__lowercase= embd_pdrop
__lowercase= attn_pdrop
__lowercase= layer_norm_epsilon
__lowercase= initializer_range
__lowercase= scale_attn_weights
__lowercase= use_cache
__lowercase= attention_softmax_in_fpaa
__lowercase= scale_attention_softmax_in_fpaa
__lowercase= multi_query
__lowercase= bos_token_id
__lowercase= eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( _UpperCAmelCase ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def _A (self , lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase= self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def _A (self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , lowerCAmelCase , lowerCAmelCase = 5_1_2 , lowerCAmelCase = 5_1_2 , lowerCAmelCase = 5_0 , lowerCAmelCase = 7.5 , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = None , **lowerCAmelCase , ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowercase= 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowercase= len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
# get prompt text embeddings
__lowercase= self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__lowercase= text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase= self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowercase= text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__lowercase= self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase= text_embeddings.shape
__lowercase= text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
__lowercase= text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase= guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase= 42
if negative_prompt is None:
__lowercase= [""""""]
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowercase= [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
__lowercase= negative_prompt
__lowercase= text_input_ids.shape[-1]
__lowercase= self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
__lowercase= self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase= uncond_embeddings.shape[1]
__lowercase= uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
__lowercase= uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase= torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase= (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase= (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
__lowercase= text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase= torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
__lowercase= torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
__lowercase= torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
__lowercase= torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__lowercase= latents_reference.to(self.device )
__lowercase= latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__lowercase= (latents_shape[3] - latents_shape_reference[3]) // 2
__lowercase= (latents_shape[2] - latents_shape_reference[2]) // 2
__lowercase= latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__lowercase= latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__lowercase= 0 if dx < 0 else dx
__lowercase= 0 if dy < 0 else dy
__lowercase= max(-dx , 0 )
__lowercase= max(-dy , 0 )
# import pdb
# pdb.set_trace()
__lowercase= latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase= self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase= latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase= """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase= {}
if accepts_eta:
__lowercase= eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
__lowercase= torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase= self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
__lowercase= self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase= noise_pred.chunk(2 )
__lowercase= noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase= self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowercase= 1 / 0.1_82_15 * latents
__lowercase= self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
__lowercase= (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase= image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__lowercase= self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
__lowercase= self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__lowercase= None
if output_type == "pil":
__lowercase= self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.