content_type stringclasses 8 values | main_lang stringclasses 7 values | message stringlengths 1 50 | sha stringlengths 40 40 | patch stringlengths 52 962k | file_count int64 1 300 |
|---|---|---|---|---|---|
Python | Python | fix the ordering bugs when using pickle_safe=true | ab6b82c2dbcf5ede7d2950eca1efe815f5c0df75 | <ide><path>keras/engine/training.py
<ide>
<ide> import warnings
<ide> import copy
<del>import time
<ide> import numpy as np
<del>import multiprocessing
<del>import threading
<ide> import six
<ide>
<add>from keras.utils import Sequence
<add>from keras.utils import GeneratorEnqueuer
<add>from keras.utils import OrderedEnqueuer
<add>
<ide> try:
<ide> import queue
<ide> except ImportError:
<ide> def _standardize_weights(y, sample_weight=None, class_weight=None,
<ide> return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
<ide>
<ide>
<del>class GeneratorEnqueuer(object):
<del> """Builds a queue out of a data generator.
<del>
<del> Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
<del>
<del> # Arguments
<del> generator: a generator function which endlessly yields data
<del> pickle_safe: use multiprocessing if True, otherwise threading
<del> """
<del>
<del> def __init__(self, generator, pickle_safe=False):
<del> self._generator = generator
<del> self._pickle_safe = pickle_safe
<del> self._threads = []
<del> self._stop_event = None
<del> self.queue = None
<del>
<del> def start(self, workers=1, max_q_size=10, wait_time=0.05):
<del> """Kicks off threads which add data from the generator into the queue.
<del>
<del> # Arguments
<del> workers: number of worker threads
<del> max_q_size: queue size (when full, threads could block on put())
<del> wait_time: time to sleep in-between calls to put()
<del> """
<del>
<del> def data_generator_task():
<del> while not self._stop_event.is_set():
<del> try:
<del> if self._pickle_safe or self.queue.qsize() < max_q_size:
<del> generator_output = next(self._generator)
<del> self.queue.put(generator_output)
<del> else:
<del> time.sleep(wait_time)
<del> except Exception:
<del> self._stop_event.set()
<del> raise
<del>
<del> try:
<del> if self._pickle_safe:
<del> self.queue = multiprocessing.Queue(maxsize=max_q_size)
<del> self._stop_event = multiprocessing.Event()
<del> if hasattr(data_generator_task, 'lock'):
<del> # We should replace the threading lock of the iterator
<del> # with a process-safe lock.
<del> data_generator_task.lock = multiprocessing.Lock()
<del> else:
<del> self.queue = queue.Queue()
<del> self._stop_event = threading.Event()
<del>
<del> for _ in range(workers):
<del> if self._pickle_safe:
<del> # Reset random seed else all children processes
<del> # share the same seed
<del> np.random.seed()
<del> thread = multiprocessing.Process(target=data_generator_task)
<del> thread.daemon = True
<del> else:
<del> thread = threading.Thread(target=data_generator_task)
<del> self._threads.append(thread)
<del> thread.start()
<del> except:
<del> self.stop()
<del> raise
<del>
<del> def is_running(self):
<del> return self._stop_event is not None and not self._stop_event.is_set()
<del>
<del> def stop(self, timeout=None):
<del> """Stop running threads and wait for them to exit, if necessary.
<del>
<del> Should be called by the same thread which called start().
<del>
<del> # Arguments
<del> timeout: maximum time to wait on thread.join()
<del> """
<del> if self.is_running():
<del> self._stop_event.set()
<del>
<del> for thread in self._threads:
<del> if thread.is_alive():
<del> if self._pickle_safe:
<del> thread.terminate()
<del> else:
<del> thread.join(timeout)
<del>
<del> if self._pickle_safe:
<del> if self.queue is not None:
<del> self.queue.close()
<del>
<del> self._threads = []
<del> self._stop_event = None
<del> self.queue = None
<del>
<del>
<ide> class Model(Container):
<ide> """The `Model` class adds training & evaluation routines to a `Container`.
<ide> """
<ide> def fit_generator(self, generator,
<ide> validation_data=None,
<ide> validation_steps=None,
<ide> class_weight=None,
<del> max_q_size=10,
<add> max_queue_size=10,
<ide> workers=1,
<del> pickle_safe=False,
<add> use_multiprocessing=False,
<ide> initial_epoch=0):
<ide> """Fits the model on data yielded batch-by-batch by a Python generator.
<ide>
<ide> The generator is run in parallel to the model, for efficiency.
<ide> For instance, this allows you to do real-time data augmentation
<ide> on images on CPU in parallel to training your model on GPU.
<ide>
<add> The use of `keras.utils.Sequence` guarantees the ordering
<add> and guarantees the single use of every input per epoch when
<add> using `use_multiprocessing=True`.
<add>
<ide> # Arguments
<del> generator: a generator.
<add> generator: a generator or an instance of Sequence (keras.utils.Sequence)
<add> object in order to avoid duplicate data
<add> when using multiprocessing.
<ide> The output of the generator must be either
<ide> - a tuple (inputs, targets)
<ide> - a tuple (inputs, targets, sample_weights).
<ide> def fit_generator(self, generator,
<ide> to yield from `generator` before stopping.
<ide> class_weight: dictionary mapping class indices to a weight
<ide> for the class.
<del> max_q_size: maximum size for the generator queue
<add> max_queue_size: maximum size for the generator queue
<ide> workers: maximum number of processes to spin up
<ide> when using process based threading
<del> pickle_safe: if True, use process based threading.
<add> use_multiprocessing: if True, use process based threading.
<ide> Note that because
<ide> this implementation relies on multiprocessing,
<ide> you should not pass
<ide> def generate_arrays_from_file(path):
<ide> # python 2 has 'next', 3 has '__next__'
<ide> # avoid any explicit version checks
<ide> val_gen = (hasattr(validation_data, 'next') or
<del> hasattr(validation_data, '__next__'))
<add> hasattr(validation_data, '__next__') or
<add> isinstance(validation_data, Sequence))
<ide> if val_gen and not validation_steps:
<ide> raise ValueError('When using a generator for validation data, '
<ide> 'you must specify a value for '
<ide> def generate_arrays_from_file(path):
<ide> val_data += [0.]
<ide> for cbk in callbacks:
<ide> cbk.validation_data = val_data
<add> is_sequence = isinstance(generator, Sequence)
<add> if not is_sequence and use_multiprocessing:
<add> warnings.warn(
<add> UserWarning('Using a generator with `use_multiprocessing=True` may duplicate your data.',
<add> 'Please consider using the `keras.utils.Sequence` object.'))
<ide> enqueuer = None
<ide>
<ide> try:
<del> enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
<del> enqueuer.start(max_q_size=max_q_size, workers=workers)
<add> if is_sequence:
<add> enqueuer = OrderedEnqueuer(generator, use_multiprocessing=use_multiprocessing)
<add> else:
<add> enqueuer = GeneratorEnqueuer(generator, use_multiprocessing=use_multiprocessing,
<add> wait_time=wait_time)
<add> enqueuer.start(workers=workers, max_queue_size=max_queue_size)
<add> output_generator = enqueuer.get()
<ide>
<ide> callback_model.stop_training = False
<ide> while epoch < epochs:
<ide> callbacks.on_epoch_begin(epoch)
<ide> steps_done = 0
<ide> batch_index = 0
<ide> while steps_done < steps_per_epoch:
<del> generator_output = None
<del> while enqueuer.is_running():
<del> if not enqueuer.queue.empty():
<del> generator_output = enqueuer.queue.get()
<del> break
<del> else:
<del> time.sleep(wait_time)
<add> generator_output = next(output_generator)
<ide>
<ide> if not hasattr(generator_output, '__len__'):
<ide> raise ValueError('output of generator should be '
<ide> def generate_arrays_from_file(path):
<ide> val_outs = self.evaluate_generator(
<ide> validation_data,
<ide> validation_steps,
<del> max_q_size=max_q_size,
<add> max_queue_size=max_queue_size,
<ide> workers=workers,
<del> pickle_safe=pickle_safe)
<add> use_multiprocessing=use_multiprocessing)
<ide> else:
<ide> # No need for try/except because
<ide> # data has already been validated.
<ide> def generate_arrays_from_file(path):
<ide>
<ide> @interfaces.legacy_generator_methods_support
<ide> def evaluate_generator(self, generator, steps,
<del> max_q_size=10, workers=1, pickle_safe=False):
<add> max_queue_size=10, workers=1, use_multiprocessing=False):
<ide> """Evaluates the model on a data generator.
<ide>
<ide> The generator should return the same kind of data
<ide> def evaluate_generator(self, generator, steps,
<ide> # Arguments
<ide> generator: Generator yielding tuples (inputs, targets)
<ide> or (inputs, targets, sample_weights)
<add> or an instance of Sequence (keras.utils.Sequence)
<add> object in order to avoid duplicate data
<add> when using multiprocessing.
<ide> steps: Total number of steps (batches of samples)
<ide> to yield from `generator` before stopping.
<del> max_q_size: maximum size for the generator queue
<add> max_queue_size: maximum size for the generator queue
<ide> workers: maximum number of processes to spin up
<ide> when using process based threading
<del> pickle_safe: if True, use process based threading.
<add> use_multiprocessing: if True, use process based threading.
<ide> Note that because
<ide> this implementation relies on multiprocessing,
<ide> you should not pass
<ide> def evaluate_generator(self, generator, steps,
<ide> wait_time = 0.01
<ide> all_outs = []
<ide> batch_sizes = []
<add> is_sequence = isinstance(generator, Sequence)
<add> if not is_sequence and use_multiprocessing:
<add> warnings.warn(
<add> UserWarning('Using a generator with `use_multiprocessing=True` may duplicate your data.',
<add> 'Please consider using the `keras.utils.Sequence` object.'))
<ide> enqueuer = None
<ide>
<ide> try:
<del> enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
<del> enqueuer.start(workers=workers, max_q_size=max_q_size)
<add> if is_sequence:
<add> enqueuer = OrderedEnqueuer(generator, use_multiprocessing=use_multiprocessing)
<add> else:
<add> enqueuer = GeneratorEnqueuer(generator, use_multiprocessing=use_multiprocessing, wait_time=wait_time)
<add> enqueuer.start(workers=workers, max_queue_size=max_queue_size)
<add> output_generator = enqueuer.get()
<ide>
<ide> while steps_done < steps:
<del> generator_output = None
<del> while enqueuer.is_running():
<del> if not enqueuer.queue.empty():
<del> generator_output = enqueuer.queue.get()
<del> break
<del> else:
<del> time.sleep(wait_time)
<del>
<add> generator_output = next(output_generator)
<ide> if not hasattr(generator_output, '__len__'):
<ide> raise ValueError('output of generator should be a tuple '
<ide> '(x, y, sample_weight) '
<ide> def evaluate_generator(self, generator, steps,
<ide>
<ide> @interfaces.legacy_generator_methods_support
<ide> def predict_generator(self, generator, steps,
<del> max_q_size=10, workers=1,
<del> pickle_safe=False, verbose=0):
<add> max_queue_size=10, workers=1,
<add> use_multiprocessing=False, verbose=0):
<ide> """Generates predictions for the input samples from a data generator.
<ide>
<ide> The generator should return the same kind of data as accepted by
<ide> `predict_on_batch`.
<ide>
<ide> # Arguments
<del> generator: Generator yielding batches of input samples.
<add> generator: Generator yielding batches of input samples
<add> or an instance of Sequence (keras.utils.Sequence)
<add> object in order to avoid duplicate data
<add> when using multiprocessing.
<ide> steps: Total number of steps (batches of samples)
<ide> to yield from `generator` before stopping.
<del> max_q_size: Maximum size for the generator queue.
<add> max_queue_size: Maximum size for the generator queue.
<ide> workers: Maximum number of processes to spin up
<ide> when using process based threading
<del> pickle_safe: If `True`, use process based threading.
<add> use_multiprocessing: If `True`, use process based threading.
<ide> Note that because
<ide> this implementation relies on multiprocessing,
<ide> you should not pass
<ide> def predict_generator(self, generator, steps,
<ide> steps_done = 0
<ide> wait_time = 0.01
<ide> all_outs = []
<add> is_sequence = isinstance(generator, Sequence)
<add> if not is_sequence and use_multiprocessing:
<add> warnings.warn(
<add> UserWarning('Using a generator with `use_multiprocessing=True` may duplicate your data.',
<add> 'Please consider using the `keras.utils.Sequence` object.'))
<ide> enqueuer = None
<ide>
<ide> try:
<del> enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
<del> enqueuer.start(workers=workers, max_q_size=max_q_size)
<add> if is_sequence:
<add> enqueuer = OrderedEnqueuer(generator, use_multiprocessing=use_multiprocessing)
<add> else:
<add> enqueuer = GeneratorEnqueuer(generator, use_multiprocessing=use_multiprocessing,
<add> wait_time=wait_time)
<add> enqueuer.start(workers=workers, max_queue_size=max_queue_size)
<add> output_generator = enqueuer.get()
<ide>
<ide> if verbose == 1:
<ide> progbar = Progbar(target=steps)
<ide>
<ide> while steps_done < steps:
<del> generator_output = None
<del> while enqueuer.is_running():
<del> if not enqueuer.queue.empty():
<del> generator_output = enqueuer.queue.get()
<del> break
<del> else:
<del> time.sleep(wait_time)
<del>
<add> generator_output = next(output_generator)
<ide> if isinstance(generator_output, tuple):
<ide> # Compatibility with the generators
<ide> # used for training.
<ide><path>keras/legacy/interfaces.py
<ide> def generator_methods_args_preprocessor(args, kwargs):
<ide> ('val_samples', 'steps'),
<ide> ('nb_epoch', 'epochs'),
<ide> ('nb_val_samples', 'validation_steps'),
<del> ('nb_worker', 'workers')],
<add> ('nb_worker', 'workers'),
<add> ('pickle_safe', 'use_multiprocessing'),
<add> ('max_q_size', 'max_queue_size')],
<ide> preprocessor=generator_methods_args_preprocessor)
<ide>
<ide>
<ide><path>keras/models.py
<ide> def fit_generator(self, generator,
<ide> validation_data=None,
<ide> validation_steps=None,
<ide> class_weight=None,
<del> max_q_size=10,
<add> max_queue_size=10,
<ide> workers=1,
<del> pickle_safe=False,
<add> use_multiprocessing=False,
<ide> initial_epoch=0):
<ide> """Fits the model on data generated batch-by-batch by a Python generator.
<ide>
<ide> def fit_generator(self, generator,
<ide> validation dataset divided by the batch size.
<ide> class_weight: Dictionary mapping class indices to a weight
<ide> for the class.
<del> max_q_size: Maximum size for the generator queue
<add> max_queue_size: Maximum size for the generator queue
<ide> workers: Maximum number of processes to spin up
<del> pickle_safe: Ff True, use process based threading.
<add> use_multiprocessing: Ff True, use process based threading.
<ide> Note that because
<ide> this implementation relies on multiprocessing,
<ide> you should not pass
<ide> def generate_arrays_from_file(path):
<ide> validation_data=validation_data,
<ide> validation_steps=validation_steps,
<ide> class_weight=class_weight,
<del> max_q_size=max_q_size,
<add> max_queue_size=max_queue_size,
<ide> workers=workers,
<del> pickle_safe=pickle_safe,
<add> use_multiprocessing=use_multiprocessing,
<ide> initial_epoch=initial_epoch)
<ide>
<ide> @interfaces.legacy_generator_methods_support
<ide> def evaluate_generator(self, generator, steps,
<del> max_q_size=10, workers=1,
<del> pickle_safe=False):
<add> max_queue_size=10, workers=1,
<add> use_multiprocessing=False):
<ide> """Evaluates the model on a data generator.
<ide>
<ide> The generator should return the same kind of data
<ide> def evaluate_generator(self, generator, steps,
<ide> or (inputs, targets, sample_weights)
<ide> steps: Total number of steps (batches of samples)
<ide> to yield from `generator` before stopping.
<del> max_q_size: maximum size for the generator queue
<add> max_queue_size: maximum size for the generator queue
<ide> workers: maximum number of processes to spin up
<del> pickle_safe: if True, use process based threading.
<add> use_multiprocessing: if True, use process based threading.
<ide> Note that because this implementation
<ide> relies on multiprocessing, you should not pass
<ide> non picklable arguments to the generator
<ide> def evaluate_generator(self, generator, steps,
<ide> 'before being used.')
<ide> return self.model.evaluate_generator(generator,
<ide> steps,
<del> max_q_size=max_q_size,
<add> max_queue_size=max_queue_size,
<ide> workers=workers,
<del> pickle_safe=pickle_safe)
<add> use_multiprocessing=use_multiprocessing)
<ide>
<ide> @interfaces.legacy_generator_methods_support
<ide> def predict_generator(self, generator, steps,
<del> max_q_size=10, workers=1,
<del> pickle_safe=False, verbose=0):
<add> max_queue_size=10, workers=1,
<add> use_multiprocessing=False, verbose=0):
<ide> """Generates predictions for the input samples from a data generator.
<ide>
<ide> The generator should return the same kind of data as accepted by
<ide> def predict_generator(self, generator, steps,
<ide> generator: generator yielding batches of input samples.
<ide> steps: Total number of steps (batches of samples)
<ide> to yield from `generator` before stopping.
<del> max_q_size: maximum size for the generator queue
<add> max_queue_size: maximum size for the generator queue
<ide> workers: maximum number of processes to spin up
<del> pickle_safe: if True, use process based threading.
<add> use_multiprocessing: if True, use process based threading.
<ide> Note that because this implementation
<ide> relies on multiprocessing, you should not pass
<ide> non picklable arguments to the generator
<ide> def predict_generator(self, generator, steps,
<ide> if self.model is None:
<ide> self.build()
<ide> return self.model.predict_generator(generator, steps,
<del> max_q_size=max_q_size,
<add> max_queue_size=max_queue_size,
<ide> workers=workers,
<del> pickle_safe=pickle_safe,
<add> use_multiprocessing=use_multiprocessing,
<ide> verbose=verbose)
<ide>
<ide> def get_config(self):
<ide><path>keras/utils/__init__.py
<ide> # Globally-importable utils.
<ide> from .io_utils import HDF5Matrix
<ide> from .data_utils import get_file
<add>from .data_utils import Sequence
<add>from .data_utils import GeneratorEnqueuer
<add>from .data_utils import OrderedEnqueuer
<ide> from .generic_utils import CustomObjectScope
<ide> from .generic_utils import custom_object_scope
<ide> from .generic_utils import get_custom_objects
<ide><path>keras/utils/data_utils.py
<ide> from __future__ import absolute_import
<ide> from __future__ import print_function
<ide>
<del>import tarfile
<del>import zipfile
<add>import hashlib
<add>import multiprocessing
<ide> import os
<del>import sys
<add>import random
<ide> import shutil
<del>import hashlib
<add>import sys
<add>import tarfile
<add>import threading
<add>import time
<add>import zipfile
<add>from abc import abstractmethod
<add>from multiprocessing.pool import ThreadPool
<add>
<add>import numpy as np
<ide> import six
<del>from six.moves.urllib.request import urlopen
<del>from six.moves.urllib.error import URLError
<ide> from six.moves.urllib.error import HTTPError
<add>from six.moves.urllib.error import URLError
<add>from six.moves.urllib.request import urlopen
<ide>
<del>from ..utils.generic_utils import Progbar
<add>try:
<add> import queue
<add>except ImportError:
<add> import Queue as queue
<ide>
<add>from ..utils.generic_utils import Progbar
<ide>
<ide> if sys.version_info[0] == 2:
<ide> def urlretrieve(url, filename, reporthook=None, data=None):
<ide> def urlretrieve(url, filename, reporthook=None, data=None):
<ide> a block size in bytes, and the total size of the file.
<ide> data: `data` argument passed to `urlopen`.
<ide> """
<add>
<ide> def chunk_read(response, chunk_size=8192, reporthook=None):
<ide> content_type = response.info().get('Content-Length')
<ide> total_size = -1
<ide> def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
<ide> return True
<ide> else:
<ide> return False
<add>
<add>
<add>class Sequence(object):
<add> """Base object for fitting to a sequence of data, such as a dataset.
<add> Every `Sequence` must implements the `__getitem__` and the `__len__` methods.
<add>
<add> # Examples
<add>
<add> ```python
<add> from skimage.io import imread
<add> from skimage.transform import resize
<add> import numpy as np
<add>
<add> # Here, `x_set` is list of path to the images
<add> # and `y_set` are the associated classes.
<add>
<add> class CIFAR10Sequence(Sequence):
<add> def __init__(self, x_set, y_set, batch_size):
<add> self.X,self.y = x_set,y_set
<add> self.batch_size = batch_size
<add>
<add> def __len__(self):
<add> return len(self.X) // self.batch_size
<add>
<add> def __getitem__(self,idx):
<add> batch_x = self.X[idx*self.batch_size:(idx+1)*self.batch_size]
<add> batch_y = self.y[idx*self.batch_size:(idx+1)*self.batch_size]
<add>
<add> return np.array([
<add> resize(imread(file_name), (200,200))
<add> for file_name in batch_x]), np.array(batch_y)
<add> ```
<add> """
<add>
<add> @abstractmethod
<add> def __getitem__(self, index):
<add> """Gets batch at position `index`.
<add>
<add> # Arguments
<add> index: position of the batch in the Sequence.
<add>
<add> # Returns
<add> A batch
<add> """
<add> raise NotImplementedError
<add>
<add> @abstractmethod
<add> def __len__(self):
<add> """Number of batch in the Sequence.
<add>
<add> # Returns
<add> The number of batches in the Sequence.
<add> """
<add> raise NotImplementedError
<add>
<add>
<add>def get_index(ds, i):
<add> """Quick fix for Python2, otherwise, it cannot be pickled.
<add>
<add> # Arguments
<add> ds: a Sequence object
<add> i: index
<add>
<add> # Returns
<add> The value at index `i`.
<add> """
<add> return ds[i]
<add>
<add>
<add>class SequenceEnqueuer(object):
<add> """Base class to enqueue inputs.
<add> The task of an Enqueuer is to use parallelism to speed up the preprocessing.
<add> This is done with processes or threads.
<add>
<add> # Examples
<add>
<add> ```python
<add> enqueuer = SequenceEnqueuer(...)
<add> enqueuer.start()
<add> datas = enqueuer.get()
<add> for data in datas:
<add> # Use the inputs; training, evaluating, predicting.
<add> # ... stop sometime.
<add> enqueuer.close()
<add> ```
<add>
<add> The `enqueuer.get()` should be an infinite stream of datas.
<add>
<add> """
<add>
<add> @abstractmethod
<add> def is_running(self):
<add> raise NotImplemented
<add>
<add> @abstractmethod
<add> def start(self, workers=1, max_queue_size=10):
<add> """Starts the handler's workers.
<add>
<add> # Arguments
<add> workers: number of worker threads
<add> max_queue_size: queue size (when full, threads could block on `put()`)
<add> """
<add> raise NotImplemented
<add>
<add> @abstractmethod
<add> def stop(self, timeout=None):
<add> """Stop running threads and wait for them to exit, if necessary.
<add>
<add> Should be called by the same thread which called start().
<add>
<add> # Arguments
<add> timeout: maximum time to wait on thread.join()
<add> """
<add> raise NotImplemented
<add>
<add> @abstractmethod
<add> def get(self):
<add> """Creates a generator to extract data from the queue. Skip the data if it's None.
<add>
<add> # Returns
<add> Generator yielding tuples (inputs, targets)
<add> or (inputs, targets, sample_weights)
<add> """
<add> raise NotImplemented
<add>
<add>
<add>class OrderedEnqueuer(SequenceEnqueuer):
<add> """Builds a Enqueuer from a Sequence.
<add>
<add> Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
<add>
<add> # Arguments
<add> sequence: A `keras.utils.data_utils.Sequence` object.
<add> use_multiprocessing: use multiprocessing if True, otherwise threading
<add> scheduling: Sequential querying of datas if 'sequential', random otherwise.
<add> """
<add>
<add> def __init__(self, sequence, use_multiprocessing=False, scheduling='sequential'):
<add> self.sequence = sequence
<add> self.use_multiprocessing = use_multiprocessing
<add> self.scheduling = scheduling
<add> self.workers = 0
<add> self.executor = None
<add> self.queue = None
<add> self.run_thread = None
<add> self.stop_signal = None
<add>
<add> def is_running(self):
<add> return self.stop_signal is not None and not self.stop_signal.is_set()
<add>
<add> def start(self, workers=1, max_queue_size=10):
<add> """Start the handler's workers.
<add>
<add> # Arguments
<add> workers: number of worker threads
<add> max_queue_size: queue size (when full, workers could block on put())
<add> """
<add> if self.use_multiprocessing:
<add> self.executor = multiprocessing.Pool(workers)
<add> else:
<add> self.executor = ThreadPool(workers)
<add> self.queue = queue.Queue(max_queue_size)
<add> self.stop_signal = threading.Event()
<add> self.run_thread = threading.Thread(target=self._run)
<add> self.run_thread.daemon = True
<add> self.run_thread.start()
<add>
<add> def _run(self):
<add> """Function to submit request to the executor and queue the `Future` objects."""
<add> sequence = list(range(len(self.sequence)))
<add> while True:
<add> if self.scheduling is not 'sequential':
<add> random.shuffle(sequence)
<add> for i in sequence:
<add> if self.stop_signal.is_set():
<add> return
<add> self.queue.put(
<add> self.executor.apply_async(get_index, (self.sequence, i)), block=True)
<add>
<add> def get(self):
<add> """Creates a generator to extract data from the queue. Skip the data if it's None.
<add>
<add> # Returns
<add> Generator yielding tuples (inputs, targets)
<add> or (inputs, targets, sample_weights)
<add> """
<add> try:
<add> while self.is_running():
<add> inputs = self.queue.get(block=True).get()
<add> if inputs is not None:
<add> yield inputs
<add> except Exception as e:
<add> self.stop()
<add> raise StopIteration(e)
<add>
<add> def stop(self, timeout=None):
<add> """Stops running threads and wait for them to exit, if necessary.
<add>
<add> Should be called by the same thread which called start().
<add>
<add> # Arguments
<add> timeout: maximum time to wait on thread.join()
<add> """
<add> self.stop_signal.set()
<add> with self.queue.mutex:
<add> self.queue.queue.clear()
<add> self.queue.unfinished_tasks = 0
<add> self.queue.not_full.notify()
<add> self.executor.close()
<add> self.run_thread.join(timeout)
<add>
<add>
<add>class GeneratorEnqueuer(SequenceEnqueuer):
<add> """Builds a queue out of a data generator.
<add>
<add> Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
<add>
<add> # Arguments
<add> generator: a generator function which endlessly yields data
<add> use_multiprocessing: use multiprocessing if True, otherwise threading
<add> wait_time: time to sleep in-between calls to `put()`
<add> random_seed: Initial seed for workers, will be incremented by one for each workers.
<add> """
<add>
<add> def __init__(self, generator, use_multiprocessing=False, wait_time=0.05, random_seed=None):
<add> self.wait_time = wait_time
<add> self._generator = generator
<add> self._use_multiprocessing = use_multiprocessing
<add> self._threads = []
<add> self._stop_event = None
<add> self.queue = None
<add> self.random_seed = random_seed
<add>
<add> def start(self, workers=1, max_queue_size=10):
<add> """Kicks off threads which add data from the generator into the queue.
<add>
<add> # Arguments
<add> workers: number of worker threads
<add> max_queue_size: queue size (when full, threads could block on put())
<add> """
<add>
<add> def data_generator_task():
<add> while not self._stop_event.is_set():
<add> try:
<add> if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
<add> generator_output = next(self._generator)
<add> self.queue.put(generator_output)
<add> else:
<add> time.sleep(self.wait_time)
<add> except Exception:
<add> self._stop_event.set()
<add> raise
<add>
<add> try:
<add> if self._use_multiprocessing:
<add> self.queue = multiprocessing.Queue(maxsize=max_queue_size)
<add> self._stop_event = multiprocessing.Event()
<add> else:
<add> self.queue = queue.Queue()
<add> self._stop_event = threading.Event()
<add>
<add> for _ in range(workers):
<add> if self._use_multiprocessing:
<add> # Reset random seed else all children processes
<add> # share the same seed
<add> np.random.seed(self.random_seed)
<add> thread = multiprocessing.Process(target=data_generator_task)
<add> thread.daemon = True
<add> if self.random_seed is not None:
<add> self.random_seed += 1
<add> else:
<add> thread = threading.Thread(target=data_generator_task)
<add> self._threads.append(thread)
<add> thread.start()
<add> except:
<add> self.stop()
<add> raise
<add>
<add> def is_running(self):
<add> return self._stop_event is not None and not self._stop_event.is_set()
<add>
<add> def stop(self, timeout=None):
<add> """Stops running threads and wait for them to exit, if necessary.
<add>
<add> Should be called by the same thread which called start().
<add>
<add> # Arguments
<add> timeout: maximum time to wait on thread.join()
<add> """
<add> if self.is_running():
<add> self._stop_event.set()
<add>
<add> for thread in self._threads:
<add> if thread.is_alive():
<add> if self._use_multiprocessing:
<add> thread.terminate()
<add> else:
<add> thread.join(timeout)
<add>
<add> if self._use_multiprocessing:
<add> if self.queue is not None:
<add> self.queue.close()
<add>
<add> self._threads = []
<add> self._stop_event = None
<add> self.queue = None
<add>
<add> def get(self):
<add> """Creates a generator to extract data from the queue. Skip the data if it's None.
<add>
<add> # Returns
<add> A generator
<add> """
<add> while self.is_running():
<add> if not self.queue.empty():
<add> inputs = self.queue.get()
<add> if inputs is not None:
<add> yield inputs
<add> else:
<add> time.sleep(self.wait_time)
<ide><path>tests/keras/engine/test_training.py
<ide> import pytest
<ide> import numpy as np
<ide> from numpy.testing import assert_allclose
<add>import sys
<ide> import scipy.sparse as sparse
<ide>
<ide> from keras.layers import Dense, Dropout
<ide> from keras.engine.topology import Input
<ide> from keras.engine.training import Model, _check_loss_and_target_compatibility
<ide> from keras.models import Sequential
<ide> from keras import backend as K
<add>from keras.utils import Sequence
<ide> from keras.utils.test_utils import keras_test
<ide> from keras.callbacks import LambdaCallback
<ide>
<ide>
<add>class RandomSequence(Sequence):
<add> def __init__(self, batch_size):
<add> self.batch_size = batch_size
<add>
<add> def __len__(self):
<add> return 12
<add>
<add> def __getitem__(self, idx):
<add> return [np.random.random((self.batch_size, 3)), np.random.random((self.batch_size, 3))], [
<add> np.random.random((self.batch_size, 4)),
<add> np.random.random((self.batch_size, 3))]
<add>
<add>
<ide> @keras_test
<ide> def test_model_methods():
<ide> a = Input(shape=(3,), name='input_a')
<ide> def test_model_methods():
<ide> out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
<ide> {'dense_1': output_a_np, 'dropout': output_b_np},
<ide> epochs=1, batch_size=4, validation_split=0.5,
<del> validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}))
<add> validation_data=(
<add> {'input_a': input_a_np, 'input_b': input_b_np},
<add> {'dense_1': output_a_np, 'dropout': output_b_np}))
<ide>
<ide> # test_on_batch
<ide> out = model.test_on_batch([input_a_np, input_b_np],
<ide> def gen_data(batch_sz):
<ide> while True:
<ide> yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
<ide> [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])
<add>
<ide> out = model.fit_generator(gen_data(4), steps_per_epoch=3, epochs=5,
<ide> initial_epoch=2, callbacks=[tracker_cb])
<ide> assert trained_epochs == [2, 3, 4]
<ide> def mse(y_true, y_pred):
<ide> out = model.predict([input_a_np, input_b_np], batch_size=4)
<ide>
<ide> # empty batch
<del> with pytest.raises(ValueError):
<add> with pytest.raises(StopIteration):
<ide> def gen_data():
<ide> yield (np.asarray([]), np.asarray([]))
<ide> out = model.evaluate_generator(gen_data(), steps=1)
<ide> def gen_data():
<ide> [output_a_np, output_b_np],
<ide> sample_weight=sample_weight)
<ide>
<add> model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
<add> sample_weight_mode=None)
<add> trained_epochs = []
<add> out = model.fit_generator(generator=RandomSequence(3), steps_per_epoch=4, epochs=5,
<add> initial_epoch=0, validation_data=RandomSequence(4),
<add> validation_steps=3, callbacks=[tracker_cb])
<add> assert trained_epochs == [0, 1, 2, 3, 4]
<add>
<add>
<add>@pytest.mark.skipif(sys.version_info < (3,), reason='Cannot catch warnings in python 2')
<add>@keras_test
<add>def test_warnings():
<add> a = Input(shape=(3,), name='input_a')
<add> b = Input(shape=(3,), name='input_b')
<add>
<add> a_2 = Dense(4, name='dense_1')(a)
<add> dp = Dropout(0.5, name='dropout')
<add> b_2 = dp(b)
<add>
<add> model = Model([a, b], [a_2, b_2])
<add>
<add> optimizer = 'rmsprop'
<add> loss = 'mse'
<add> loss_weights = [1., 0.5]
<add> model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
<add> sample_weight_mode=None)
<add>
<add> def gen_data(batch_sz):
<add> while True:
<add> yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
<add> [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])
<add>
<add> with pytest.warns(Warning) as w:
<add> out = model.fit_generator(gen_data(4), steps_per_epoch=10, use_multiprocessing=True, workers=2)
<add> warning_raised = any(['Sequence' in str(w_.message) for w_ in w])
<add> assert warning_raised, 'No warning raised when using generator with processes.'
<add>
<add> with pytest.warns(None) as w:
<add> out = model.fit_generator(RandomSequence(3), steps_per_epoch=4, use_multiprocessing=True, workers=2)
<add> assert all(['Sequence' not in str(w_.message) for w_ in w]), 'A warning was raised for Sequence.'
<add>
<ide>
<ide> @pytest.mark.skipif(K.backend() != 'tensorflow', reason='sparse operations supported only by TF')
<ide> @keras_test
<ide><path>tests/keras/legacy/interface_test.py
<ide> def pred_generator():
<ide> samples_per_epoch=1,
<ide> validation_data=val_generator(),
<ide> nb_val_samples=1,
<del> nb_worker=1)
<del> model.fit_generator(train_generator(),
<del> 10,
<del> 1,
<del> nb_val_samples=1,
<del> nb_worker=1)
<add> nb_worker=1, pickle_safe=True, max_q_size=3)
<add>
<ide> model.evaluate_generator(generator=train_generator(),
<ide> val_samples=2,
<del> nb_worker=1)
<add> nb_worker=1, pickle_safe=False, max_q_size=3)
<ide> model.predict_generator(generator=pred_generator(),
<ide> val_samples=2,
<del> nb_worker=1)
<add> nb_worker=1, pickle_safe=False, max_q_size=3)
<ide>
<ide>
<ide> def test_spatialdropout1d_legacy_interface():
<ide><path>tests/keras/test_sequential_model.py
<ide> def data_generator(train):
<ide> model.fit_generator(data_generator(True), 5, epochs,
<ide> validation_data=data_generator(False),
<ide> validation_steps=3)
<del> model.fit_generator(data_generator(True), 5, epochs, max_q_size=2)
<add> model.fit_generator(data_generator(True), 5, epochs, max_queue_size=2)
<ide> model.evaluate(x_train, y_train)
<ide>
<ide>
<ide> def data_generator(x, y, batch_size=50):
<ide>
<ide> loss = model.evaluate(x_test, y_test)
<ide>
<del> prediction = model.predict_generator(data_generator(x_test, y_test), 1, max_q_size=2, verbose=1)
<del> gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50), 1, max_q_size=2)
<add> prediction = model.predict_generator(data_generator(x_test, y_test), 1, max_queue_size=2, verbose=1)
<add> gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50), 1, max_queue_size=2)
<ide> pred_loss = K.eval(K.mean(losses.get(model.loss)(K.variable(y_test), K.variable(prediction))))
<ide>
<ide> assert(np.isclose(pred_loss, loss))
<ide><path>tests/keras/utils/data_utils_test.py
<ide> """Tests for functions in data_utils.py.
<ide> """
<ide> import os
<del>import pytest
<add>import sys
<ide> import tarfile
<add>import threading
<ide> import zipfile
<del>from six.moves.urllib.request import pathname2url
<add>from itertools import cycle
<add>
<add>import numpy as np
<add>import pytest
<ide> from six.moves.urllib.parse import urljoin
<add>from six.moves.urllib.request import pathname2url
<add>
<add>from keras.utils import Sequence
<add>from keras.utils import GeneratorEnqueuer
<add>from keras.utils import OrderedEnqueuer
<add>from keras.utils.data_utils import _hash_file
<ide> from keras.utils.data_utils import get_file
<ide> from keras.utils.data_utils import validate_file
<del>from keras.utils.data_utils import _hash_file
<add>
<add>if sys.version_info < (3,):
<add> def next(x):
<add> return x.next()
<ide>
<ide>
<ide> @pytest.fixture
<ide> def test_data_utils(in_tmpdir):
<ide> os.remove('test.txt')
<ide> os.remove('test.zip')
<ide>
<add>
<add>"""Enqueuers Tests"""
<add>
<add>
<add>class threadsafe_iter:
<add> """Takes an iterator/generator and makes it thread-safe by
<add> serializing call to the `next` method of given iterator/generator.
<add> """
<add>
<add> def __init__(self, it):
<add> self.it = it
<add> self.lock = threading.Lock()
<add>
<add> def __iter__(self):
<add> return self
<add>
<add> def __next__(self):
<add> return self.next()
<add>
<add> def next(self):
<add> with self.lock:
<add> return next(self.it)
<add>
<add>
<add>def threadsafe_generator(f):
<add> """A decorator that takes a generator function and makes it thread-safe.
<add> """
<add>
<add> def g(*a, **kw):
<add> return threadsafe_iter(f(*a, **kw))
<add>
<add> return g
<add>
<add>
<add>class TestSequence(Sequence):
<add> def __init__(self, shape):
<add> self.shape = shape
<add>
<add> def __getitem__(self, item):
<add> return np.ones(self.shape, dtype=np.uint8) * item
<add>
<add> def __len__(self):
<add> return 100
<add>
<add>
<add>class FaultSequence(Sequence):
<add> def __getitem__(self, item):
<add> raise IndexError(item, 'is not present')
<add>
<add> def __len__(self):
<add> return 100
<add>
<add>
<add>@threadsafe_generator
<add>def create_generator_from_sequence_threads(ds):
<add> for i in cycle(range(len(ds))):
<add> yield ds[i]
<add>
<add>
<add>def create_generator_from_sequence_pcs(ds):
<add> for i in cycle(range(len(ds))):
<add> yield ds[i]
<add>
<add>
<add>def test_generator_enqueuer_threads():
<add> enqueuer = GeneratorEnqueuer(create_generator_from_sequence_threads(
<add> TestSequence([3, 200, 200, 3])), use_multiprocessing=False)
<add> enqueuer.start(3, 10)
<add> gen_output = enqueuer.get()
<add> acc = []
<add> for i in range(100):
<add> acc.append(int(next(gen_output)[0, 0, 0, 0]))
<add>
<add> """
<add> Not comparing the order since it is not guarantee.
<add> It may get ordered, but not a lot, one thread can take the GIL before he was supposed to.
<add> """
<add> assert len(set(acc) - set(range(100))) == 0, "Output is not the same"
<add> enqueuer.stop()
<add>
<add>
<add>def test_generator_enqueuer_processes():
<add> enqueuer = GeneratorEnqueuer(create_generator_from_sequence_pcs(
<add> TestSequence([3, 200, 200, 3])), use_multiprocessing=True)
<add> enqueuer.start(3, 10)
<add> gen_output = enqueuer.get()
<add> acc = []
<add> for i in range(100):
<add> acc.append(int(next(gen_output)[0, 0, 0, 0]))
<add> assert acc != list(range(100)), "Order was keep in GeneratorEnqueuer with processes"
<add> enqueuer.stop()
<add>
<add>
<add>def test_generator_enqueuer_fail_threads():
<add> enqueuer = GeneratorEnqueuer(create_generator_from_sequence_threads(
<add> FaultSequence()), use_multiprocessing=False)
<add> enqueuer.start(3, 10)
<add> gen_output = enqueuer.get()
<add> with pytest.raises(StopIteration):
<add> next(gen_output)
<add>
<add>
<add>def test_generator_enqueuer_fail_processes():
<add> enqueuer = GeneratorEnqueuer(create_generator_from_sequence_pcs(
<add> FaultSequence()), use_multiprocessing=True)
<add> enqueuer.start(3, 10)
<add> gen_output = enqueuer.get()
<add> with pytest.raises(StopIteration):
<add> next(gen_output)
<add>
<add>
<add>def test_ordered_enqueuer_threads():
<add> enqueuer = OrderedEnqueuer(TestSequence([3, 200, 200, 3]), use_multiprocessing=False)
<add> enqueuer.start(3, 10)
<add> gen_output = enqueuer.get()
<add> acc = []
<add> for i in range(100):
<add> acc.append(next(gen_output)[0, 0, 0, 0])
<add> assert acc == list(range(100)), "Order was not keep in GeneratorEnqueuer with threads"
<add> enqueuer.stop()
<add>
<add>
<add>def test_ordered_enqueuer_processes():
<add> enqueuer = OrderedEnqueuer(TestSequence([3, 200, 200, 3]), use_multiprocessing=True)
<add> enqueuer.start(3, 10)
<add> gen_output = enqueuer.get()
<add> acc = []
<add> for i in range(100):
<add> acc.append(next(gen_output)[0, 0, 0, 0])
<add> assert acc == list(range(100)), "Order was not keep in GeneratorEnqueuer with processes"
<add> enqueuer.stop()
<add>
<add>
<add>def test_ordered_enqueuer_fail_threads():
<add> enqueuer = OrderedEnqueuer(FaultSequence(), use_multiprocessing=False)
<add> enqueuer.start(3, 10)
<add> gen_output = enqueuer.get()
<add> with pytest.raises(StopIteration):
<add> next(gen_output)
<add>
<add>
<add>def test_ordered_enqueuer_fail_processes():
<add> enqueuer = OrderedEnqueuer(FaultSequence(), use_multiprocessing=True)
<add> enqueuer.start(3, 10)
<add> gen_output = enqueuer.get()
<add> with pytest.raises(StopIteration):
<add> next(gen_output)
<add>
<add>
<ide> if __name__ == '__main__':
<ide> pytest.main([__file__])
<ide><path>tests/test_multiprocessing.py
<ide> def custom_generator(use_weights=False):
<ide> steps_per_epoch=5,
<ide> epochs=1,
<ide> verbose=1,
<del> max_q_size=10,
<add> max_queue_size=10,
<ide> workers=4,
<del> pickle_safe=True)
<add> use_multiprocessing=True)
<ide>
<ide> model.fit_generator(custom_generator(),
<ide> steps_per_epoch=5,
<ide> epochs=1,
<ide> verbose=1,
<del> max_q_size=10,
<del> pickle_safe=False)
<add> max_queue_size=10,
<add> use_multiprocessing=False)
<ide>
<ide> model.fit_generator(custom_generator(True),
<ide> steps_per_epoch=5,
<ide> def custom_generator():
<ide> steps_per_epoch=5,
<ide> epochs=1,
<ide> verbose=1,
<del> max_q_size=10,
<add> max_queue_size=10,
<ide> workers=2,
<del> pickle_safe=True)
<add> use_multiprocessing=True)
<ide>
<ide> model.fit_generator(custom_generator(),
<ide> steps_per_epoch=5,
<ide> epochs=1,
<ide> verbose=1,
<del> max_q_size=10,
<del> pickle_safe=False)
<add> max_queue_size=10,
<add> use_multiprocessing=False)
<ide>
<ide> os.remove('data.npz')
<ide>
<ide> def custom_generator():
<ide> model.compile(loss='mse', optimizer='adadelta')
<ide> model.predict_generator(custom_generator(),
<ide> steps=5,
<del> max_q_size=10,
<add> max_queue_size=10,
<ide> workers=2,
<del> pickle_safe=True)
<add> use_multiprocessing=True)
<ide> model.predict_generator(custom_generator(),
<ide> steps=5,
<del> max_q_size=10,
<del> pickle_safe=False)
<add> max_queue_size=10,
<add> use_multiprocessing=False)
<ide>
<ide>
<ide> @keras_test
<ide> def custom_generator():
<ide>
<ide> model.evaluate_generator(custom_generator(),
<ide> steps=5,
<del> max_q_size=10,
<add> max_queue_size=10,
<ide> workers=2,
<del> pickle_safe=True)
<add> use_multiprocessing=True)
<ide> model.evaluate_generator(custom_generator(),
<ide> steps=5,
<del> max_q_size=10,
<del> pickle_safe=False)
<add> max_queue_size=10,
<add> use_multiprocessing=False)
<ide>
<ide>
<ide> @keras_test
<ide> def custom_generator():
<ide>
<ide> samples = batch_size * (good_batches + 1)
<ide>
<del> with pytest.raises(ValueError):
<add> with pytest.raises(StopIteration):
<ide> model.fit_generator(
<ide> custom_generator(), samples, 1,
<del> workers=4, pickle_safe=True,
<add> workers=4, use_multiprocessing=True,
<ide> )
<ide>
<del> with pytest.raises(ValueError):
<add> with pytest.raises(StopIteration):
<ide> model.fit_generator(
<ide> custom_generator(), samples, 1,
<del> pickle_safe=False,
<add> use_multiprocessing=False,
<ide> )
<ide>
<ide>
<ide> def custom_generator():
<ide> model.add(Dense(1, input_shape=(2, )))
<ide> model.compile(loss='mse', optimizer='adadelta')
<ide>
<del> with pytest.raises(ValueError):
<add> with pytest.raises(StopIteration):
<ide> model.evaluate_generator(
<ide> custom_generator(), good_batches + 1, 1,
<del> workers=4, pickle_safe=True,
<add> workers=4, use_multiprocessing=True,
<ide> )
<ide>
<del> with pytest.raises(ValueError):
<add> with pytest.raises(StopIteration):
<ide> model.evaluate_generator(
<ide> custom_generator(), good_batches + 1, 1,
<del> pickle_safe=False,
<add> use_multiprocessing=False,
<ide> )
<ide>
<ide>
<ide> def custom_generator():
<ide> model.add(Dense(1, input_shape=(2, )))
<ide> model.compile(loss='mse', optimizer='adadelta')
<ide>
<del> with pytest.raises(ValueError):
<add> with pytest.raises(StopIteration):
<ide> model.predict_generator(
<ide> custom_generator(), good_batches + 1, 1,
<del> workers=4, pickle_safe=True,
<add> workers=4, use_multiprocessing=True,
<ide> )
<ide>
<del> with pytest.raises(ValueError):
<add> with pytest.raises(StopIteration):
<ide> model.predict_generator(
<ide> custom_generator(), good_batches + 1, 1,
<del> pickle_safe=False,
<add> use_multiprocessing=False,
<ide> )
<ide>
<ide> | 10 |
Javascript | Javascript | make fs.symlink() with no callback async | c381662cac0e897b4bfdb2c31cd3117c5ecd2130 | <ide><path>lib/fs.js
<ide> fs.readlinkSync = function(path) {
<ide> fs.symlink = function(destination, path, type_, callback) {
<ide> var type = (typeof(type_) == 'string' ? type_ : null);
<ide> var callback_ = arguments[arguments.length - 1];
<del> callback = (typeof(callback_) == 'function' ? callback_ : null);
<add> callback = (typeof(callback_) == 'function' ? callback_ : noop);
<ide>
<ide> if (isWindows && type === 'junction') {
<ide> destination = pathModule._makeLong(destination); | 1 |
Python | Python | comment the seq2seq functions | 4c81960b9bc0f553ddf800df16bb82804e162bcb | <ide><path>transformers/modeling_seq2seq.py
<ide> def __init__(self, encoder, decoder):
<ide>
<ide> @classmethod
<ide> def from_pretrained(cls, encoder_pretrained_model_name_or_path, decoder_pretrained_model_name_or_path, *model_args, **kwargs):
<del> r""" Instantiates one of the base model classes of the library
<del> from a pre-trained model configuration.
<del> The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
<del> To train the model, you should first set it back in training mode with `model.train()`
<add> r""" Instantiates an encoder and a decoder from one or two base classes
<add> of the library from pre-trained model checkpoints.
<add>
<add>
<add> The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
<add> To train the model, you need to first set it back in training mode with `model.train()`
<ide>
<ide> Params:
<del> pretrained_model_name_or_path: either:
<add> encoder_pretrained_model_name_or_path: information necessary to initiate the encoder. Either:
<add>
<add> - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
<add> - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
<add> - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
<add>
<add> decoder_pretrained_model_name_or_path: information necessary to initiate the decoder. Either:
<ide>
<ide> - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
<ide> - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
<ide> def from_pretrained(cls, encoder_pretrained_model_name_or_path, decoder_pretrain
<ide> output_loading_info: (`optional`) boolean:
<ide> Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
<ide>
<del> kwargs: (`optional`) Remaining dictionary of keyword arguments:
<add> kwargs: (`optional`) Remaining dictionary of keyword arguments.
<ide> Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
<ide>
<ide> - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
<ide> - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
<ide>
<add> You can specify different kwargs for the decoder by prefixing the key with `decoder_` (e.g. ``decoder_output_attention=True``).
<add>
<ide> Examples::
<ide>
<del> model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
<del> model = AutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
<del> model = AutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
<del> assert model.config.output_attention == True
<del> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
<del> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
<del> model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
<add> model = PreTrainedSeq2seq.from_pretained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
<ide> """
<ide>
<ide> # Separate the encoder- and decoder- specific kwargs. A kwarg is
<ide> def from_pretrained(cls, encoder_pretrained_model_name_or_path, decoder_pretrain
<ide> encoder = kwargs.pop('encoder_model', None)
<ide> if encoder is None:
<ide> kwargs_encoder['is_decoder'] = False
<del> encoder = AutoModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs_encoder)
<add> encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
<ide>
<ide> decoder = kwargs.pop('decoder_model', None)
<ide> if decoder is None:
<ide> kwargs_decoder['is_decoder'] = True
<del> decoder_model = AutoModelWithLMHead.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs)
<add> decoder = AutoModelWithLMHead.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
<ide>
<ide> model = cls(encoder, decoder)
<ide>
<ide> return model
<ide>
<ide> def forward(self, *inputs, **kwargs):
<del> # Extract decoder inputs
<del> decoder_kwargs = {}
<del> for key in kwargs.keys():
<add> """ The forward pass on a seq2eq depends what we are performing:
<add>
<add> - During training we perform one forward pass through both the encoder
<add> and decoder;
<add> - During prediction, we perform one forward pass through the encoder,
<add> and then perform several forward passes with the encoder's hidden
<add> state through the decoder to decode a full sequence.
<add>
<add> Therefore, we skip the forward pass on the encoder if an argument named
<add> `encoder_hidden_state` is passed to this function.
<add>
<add> """
<add> # Separate the encoder- and decoder- specific kwargs. A kwarg is
<add> # decoder-specific it the key starts with `decoder_`
<add> kwargs_decoder = {}
<add> kwargs_encoder = kwargs
<add> for key in kwargs_encoder.keys():
<ide> if key.startswith('decoder_'):
<del> decoder_kwargs[key.replace('decoder_', '')] = kwargs.pop(key)
<add> kwargs_decoder[key.replace('decoder_', '')] = kwargs_encoder.pop(key)
<ide>
<del> # Compute encoder hidden states if needed
<del> encoder_hidden_states = kwargs.pop('encoder_hidden_states', None)
<add> # Encode if needed (training, first prediction pass)
<add> encoder_hidden_states = kwargs_encoder.pop('encoder_hidden_states', None)
<ide> if encoder_hidden_states is None:
<del> encoder_outputs = self.encoder(*inputs, *kwargs)
<add> encoder_outputs = self.encoder(*inputs, **kwargs_encoder)
<ide> encoder_hidden_states = encoder_outputs[0]
<ide> else:
<del> encoder_outputs = (,)
<add> encoder_outputs = ()
<ide>
<ide> # Decode
<del> decoder_kwargs['encoder_hidden_states'] = encoder_hidden_states
<del> decoder_outputs = self.decoder(**decoder_kwargs)
<add> kwargs_decoder['encoder_hidden_states'] = encoder_hidden_states
<add> decoder_outputs = self.decoder(**kwargs_decoder)
<ide>
<ide> return decoder_outputs + encoder_outputs
<ide>
<ide> def from_pretrained(cls, *args, **kwargs):
<ide> # We will create a randomly initilized LSTM model as decoder
<ide> if 'decoder_config' not in kwargs:
<ide> raise ValueError("To load an LSTM in Seq2seq model, please supply either: "
<del> " - a torch.nn.LSTM model as `decoder_model` parameter (`decoder_model=lstm_model`), or "
<del> " - a dictionary of configuration parameters that will be used to initialize a
<del> " torch.nn.LSTM model as `decoder_config` keyword argument. "
<del> " E.g. `decoder_config=\{'input_size': 768, 'hidden_size': 768, 'num_layers': 2\}`")
<del> kwargs['decoder_model'] = torch.nn.LSTM(kwarg.pop('decoder_config'))
<add> " - a torch.nn.LSTM model as `decoder_model` parameter (`decoder_model=lstm_model`), or"
<add> " - a dictionary of configuration parameters that will be used to initialize a"
<add> " torch.nn.LSTM model as `decoder_config` keyword argument. "
<add> " E.g. `decoder_config={'input_size': 768, 'hidden_size': 768, 'num_layers': 2}`")
<add> kwargs['decoder_model'] = torch.nn.LSTM(kwargs.pop('decoder_config'))
<ide> model = super(Model2LSTM, cls).from_pretrained(*args, **kwargs)
<ide> return model
<del> | 1 |
Javascript | Javascript | fix warning without stack for ie9 | 1b2646a403a23556084189f055d75d68da2a6cd4 | <ide><path>packages/shared/warningWithoutStack.js
<ide> if (__DEV__) {
<ide> 'message argument',
<ide> );
<ide> }
<add> if (args.length > 8) {
<add> // Check before the condition to catch violations early.
<add> throw new Error(
<add> 'warningWithoutStack() currently supports at most 8 arguments.',
<add> );
<add> }
<ide> if (condition) {
<ide> return;
<ide> }
<ide> if (typeof console !== 'undefined') {
<del> const stringArgs = args.map(item => '' + item);
<del> console.error('Warning: ' + format, ...stringArgs);
<add> const [a, b, c, d, e, f, g, h] = args.map(item => '' + item);
<add> const message = 'Warning: ' + format;
<add>
<add> // We intentionally don't use spread (or .apply) because it breaks IE11:
<add> // https://github.com/facebook/react/issues/13610
<add> switch (args.length) {
<add> case 0:
<add> console.error(message);
<add> break;
<add> case 1:
<add> console.error(message, a);
<add> break;
<add> case 2:
<add> console.error(message, a, b);
<add> break;
<add> case 3:
<add> console.error(message, a, b, c);
<add> break;
<add> case 4:
<add> console.error(message, a, b, c, d);
<add> break;
<add> case 5:
<add> console.error(message, a, b, c, d, e);
<add> break;
<add> case 6:
<add> console.error(message, a, b, c, d, e, f);
<add> break;
<add> case 7:
<add> console.error(message, a, b, c, d, e, f, g);
<add> break;
<add> case 8:
<add> console.error(message, a, b, c, d, e, f, g, h);
<add> break;
<add> default:
<add> throw new Error(
<add> 'warningWithoutStack() currently supports at most 8 arguments.',
<add> );
<add> }
<ide> }
<ide> try {
<ide> // --- Welcome to debugging React --- | 1 |
Text | Text | fix an output example in repl.md | 4936c411945f6e6cd369a348a9262b2e214bd299 | <ide><path>doc/api/repl.md
<ide> global or scoped variable, the input `fs` will be evaluated on-demand as
<ide>
<ide> The default evaluator will, by default, assign the result of the most recently
<ide> evaluated expression to the special variable `_` (underscore).
<add>Explicitly setting `_` to a value will disable this behavior.
<ide>
<ide> ```js
<ide> > [ 'a', 'b', 'c' ]
<ide> [ 'a', 'b', 'c' ]
<ide> > _.length
<ide> 3
<ide> > _ += 1
<add>Expression assignment to _ now disabled.
<add>4
<add>> 1 + 1
<add>2
<add>> _
<ide> 4
<ide> ```
<ide>
<del>Explicitly setting `_` to a value will disable this behavior.
<del>
<ide> ### Custom Evaluation Functions
<ide>
<ide> When a new `repl.REPLServer` is created, a custom evaluation function may be | 1 |
Ruby | Ruby | use original system function for gcc check | d9f1be7ef270458b38d9b41d64d0d807dbabd405 | <ide><path>install_homebrew.rb
<ide> def ohai *args
<ide> def warn warning
<ide> puts "#{Tty.red}Warning#{Tty.reset}: #{warning.chomp}"
<ide> end
<del>
<del>alias :system_orig :system
<ide>
<ide> def system *args
<del> abort "Failed during: #{args.shell_s}" unless system_orig *args
<add> abort "Failed during: #{args.shell_s}" unless Kernel.system *args
<ide> end
<ide>
<ide> def sudo *args
<ide> def getc # NOTE only tested on OS X
<ide> ohai "Installation successful!"
<ide>
<ide> warn "/usr/local/bin is not in your PATH." unless ENV['PATH'].split(':').include? '/usr/local/bin'
<del>warn "Now install Xcode." unless system "/usr/bin/which gcc"
<add>warn "Now install Xcode." unless Kernel.system "/usr/bin/which -s gcc" | 1 |
Ruby | Ruby | reduce allocations in camelize inflector | 73bc476cd0e0387b4041ad56f6ac54b9bbc5599c | <ide><path>activesupport/lib/active_support/inflector/methods.rb
<ide> def singularize(word, locale = :en)
<ide> def camelize(term, uppercase_first_letter = true)
<ide> string = term.to_s
<ide> if uppercase_first_letter
<del> string = string.sub(/^[a-z\d]*/) { |match| inflections.acronyms[match] || match.capitalize }
<add> string = string.sub(/^[a-z\d]*/) { |match| inflections.acronyms[match] || match.capitalize! || match }
<ide> else
<del> string = string.sub(inflections.acronyms_camelize_regex) { |match| match.downcase }
<add> string = string.sub(inflections.acronyms_camelize_regex) { |match| match.downcase! || match }
<add> end
<add> string.gsub!(/(?:_|(\/))([a-z\d]*)/i) do
<add> substituted = inflections.acronyms[$2] || $2.capitalize! || $2
<add> $1 ? "::#{substituted}" : substituted
<ide> end
<del> string.gsub!(/(?:_|(\/))([a-z\d]*)/i) { "#{$1}#{inflections.acronyms[$2] || $2.capitalize}" }
<del> string.gsub!("/", "::")
<ide> string
<ide> end
<ide> | 1 |
Javascript | Javascript | run all logbox tests | 59ea81adb95e068f93231b22d0a91a5435be31d6 | <ide><path>test/acceptance/ReactRefreshLogBox.test.js
<ide> test('unterminated JSX', async () => {
<ide> await cleanup()
<ide> })
<ide>
<del>test.only('conversion to class component (1)', async () => {
<add>test('conversion to class component (1)', async () => {
<ide> const [session, cleanup] = await sandbox()
<ide>
<ide> await session.write( | 1 |
Python | Python | remove unused config hooks | 3b23b4d7c08e3e61f7e1ff3b91a6626b2bcfd5e3 | <ide><path>test/pseudo-tty/testcfg.py
<ide> def RunCommand(self, command, env):
<ide> self.context.GetTimeout(self.mode),
<ide> env,
<ide> True)
<del> self.Cleanup()
<ide> return test.TestOutput(self,
<ide> full_command,
<ide> output,
<ide><path>tools/test.py
<ide> def RunCommand(self, command, env):
<ide> self.context.GetTimeout(self.mode),
<ide> env,
<ide> disable_core_files = self.disable_core_files)
<del> self.Cleanup()
<ide> return TestOutput(self,
<ide> full_command,
<ide> output,
<ide> self.context.store_unexpected_output)
<ide>
<del> def BeforeRun(self):
<del> pass
<del>
<del> def AfterRun(self, result):
<del> pass
<del>
<ide> def Run(self):
<del> self.BeforeRun()
<del>
<ide> try:
<ide> result = self.RunCommand(self.GetCommand(), {
<ide> "TEST_THREAD_ID": "%d" % self.thread_id,
<ide> def Run(self):
<ide> from os import O_NONBLOCK
<ide> for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
<ide>
<del> self.AfterRun(result)
<ide> return result
<ide>
<del> def Cleanup(self):
<del> return
<del>
<ide>
<ide> class TestOutput(object):
<ide> | 2 |
Javascript | Javascript | bring stream examples up-to-date | 8b775c032c3738f7a8f04444165ad0936572afed | <ide><path>examples/stream/stack.js
<ide> var n = 4, // number of layers
<ide> data = d3.layout.stack()(stream_layers(n, m, .1)),
<ide> color = d3.interpolateRgb("#aad", "#556");
<ide>
<del>var p = 20,
<del> w = 960,
<del> h = 500 - .5 - p,
<add>var margin = 20,
<add> width = 960,
<add> height = 500 - .5 - margin,
<ide> mx = m,
<ide> my = d3.max(data, function(d) {
<ide> return d3.max(d, function(d) {
<ide> var p = 20,
<ide> return d.y;
<ide> });
<ide> }),
<del> x = function(d) { return d.x * w / mx; },
<del> y0 = function(d) { return h - d.y0 * h / my; },
<del> y1 = function(d) { return h - (d.y + d.y0) * h / my; },
<del> y2 = function(d) { return d.y * h / mz; }; // or `my` to not rescale
<add> x = function(d) { return d.x * width / mx; },
<add> y0 = function(d) { return height - d.y0 * height / my; },
<add> y1 = function(d) { return height - (d.y + d.y0) * height / my; },
<add> y2 = function(d) { return d.y * height / mz; }; // or `my` to not rescale
<ide>
<ide> var vis = d3.select("#chart")
<ide> .append("svg")
<del> .attr("width", w)
<del> .attr("height", h + p);
<add> .attr("width", width)
<add> .attr("height", height + margin);
<ide>
<ide> var layers = vis.selectAll("g.layer")
<ide> .data(data)
<ide> var bars = layers.selectAll("g.bar")
<ide> bars.append("rect")
<ide> .attr("width", x({x: .9}))
<ide> .attr("x", 0)
<del> .attr("y", h)
<add> .attr("y", height)
<ide> .attr("height", 0)
<ide> .transition()
<ide> .delay(function(d, i) { return i * 10; })
<ide> var labels = vis.selectAll("text.label")
<ide> .enter().append("text")
<ide> .attr("class", "label")
<ide> .attr("x", x)
<del> .attr("y", h + 6)
<add> .attr("y", height + 6)
<ide> .attr("dx", x({x: .45}))
<ide> .attr("dy", ".71em")
<ide> .attr("text-anchor", "middle")
<ide> .text(function(d, i) { return i; });
<ide>
<ide> vis.append("line")
<ide> .attr("x1", 0)
<del> .attr("x2", w - x({x: .1}))
<del> .attr("y1", h)
<del> .attr("y2", h);
<add> .attr("x2", width - x({x: .1}))
<add> .attr("y1", height)
<add> .attr("y2", height);
<ide>
<ide> function transitionGroup() {
<ide> var group = d3.selectAll("#chart");
<ide> function transitionGroup() {
<ide> d3.select(this)
<ide> .transition()
<ide> .duration(500)
<del> .attr("y", function(d) { return h - y2(d); })
<add> .attr("y", function(d) { return height - y2(d); })
<ide> .attr("height", y2);
<ide> }
<ide> }
<ide><path>examples/stream/stream.js
<ide> var n = 20, // number of layers
<ide> data1 = d3.layout.stack().offset("wiggle")(stream_layers(n, m)),
<ide> color = d3.interpolateRgb("#aad", "#556");
<ide>
<del>var w = 960,
<del> h = 500,
<add>var width = 960,
<add> height = 500,
<ide> mx = m - 1,
<ide> my = d3.max(data0.concat(data1), function(d) {
<ide> return d3.max(d, function(d) {
<ide> var w = 960,
<ide> });
<ide>
<ide> var area = d3.svg.area()
<del> .x(function(d) { return d.x * w / mx; })
<del> .y0(function(d) { return h - d.y0 * h / my; })
<del> .y1(function(d) { return h - (d.y + d.y0) * h / my; });
<add> .x(function(d) { return d.x * width / mx; })
<add> .y0(function(d) { return height - d.y0 * height / my; })
<add> .y1(function(d) { return height - (d.y + d.y0) * height / my; });
<ide>
<ide> var vis = d3.select("#chart")
<ide> .append("svg")
<del> .attr("width", w)
<del> .attr("height", h);
<add> .attr("width", width)
<add> .attr("height", height);
<ide>
<ide> vis.selectAll("path")
<ide> .data(data0) | 2 |
Ruby | Ruby | remove full_clone from coretap | 6aa1695df1421d26ae8c536ddac00feb762a6a5b | <ide><path>Library/Homebrew/tap.rb
<ide> def self.ensure_installed!
<ide> end
<ide>
<ide> # CoreTap never allows shallow clones (on request from GitHub).
<del> def install(full_clone: true, quiet: false, clone_target: nil, force_auto_update: nil)
<add> def install(quiet: false, clone_target: nil, force_auto_update: nil)
<ide> raise "Shallow clones are not supported for homebrew-core!" unless full_clone
<ide>
<ide> remote = Homebrew::EnvConfig.core_git_remote
<ide> if remote != default_remote
<ide> $stderr.puts "HOMEBREW_CORE_GIT_REMOTE set: using #{remote} for Homebrew/core Git remote URL."
<ide> end
<del> super(full_clone: full_clone, quiet: quiet, clone_target: remote, force_auto_update: force_auto_update)
<add> super(quiet: quiet, clone_target: remote, force_auto_update: force_auto_update)
<ide> end
<ide>
<ide> # @private | 1 |
Ruby | Ruby | allow leopard_64_or_later bottles | dd75dd8a25b6e60b4f87c2220335e5bd37b9cc0f | <ide><path>Library/Homebrew/os/mac/version.rb
<ide> class Version < ::Version
<ide> mountain_lion: "10.8",
<ide> lion: "10.7",
<ide> snow_leopard: "10.6",
<add> leopard_64: "10.5",
<ide> leopard: "10.5",
<ide> tiger: "10.4",
<ide> }.freeze | 1 |
Python | Python | require thinc v6.11 | f2fa8481c4573a55da7142cc133e5b456fa9c5dc | <ide><path>setup.py
<ide> def setup_package():
<ide> 'murmurhash>=0.28,<0.29',
<ide> 'cymem>=1.30,<1.32',
<ide> 'preshed>=1.0.0,<2.0.0',
<del> 'thinc>=6.10.1,<6.11.0',
<add> 'thinc>=6.11.0,<6.12.0',
<ide> 'plac<1.0.0,>=0.9.6',
<ide> 'pathlib',
<ide> 'ujson>=1.35', | 1 |
Text | Text | add aspectratio property to responsive doc | 1ba06a26fddde17285072f3a446679e9388b382f | <ide><path>docs/general/responsive.md
<ide> Chart.js provides a [few options](#configuration-options) to enable responsivene
<ide> | `responsive` | `Boolean` | `true` | Resizes the chart canvas when its container does ([important note...](#important-note)).
<ide> | `responsiveAnimationDuration` | `Number` | `0` | Duration in milliseconds it takes to animate to new size after a resize event.
<ide> | `maintainAspectRatio` | `Boolean` | `true` | Maintain the original canvas aspect ratio `(width / height)` when resizing.
<add>| `aspectRatio` | `Number` | `2` | Canvas aspect ratio (i.e. `width / height`, a value of 1 representing a square canvas). Note that this option is ignored if the height is explicitly defined either as attribute or via the style.
<ide> | `onResize` | `Function` | `null` | Called when a resize occurs. Gets passed two arguments: the chart instance and the new size.
<ide>
<ide> ## Important Note | 1 |
Javascript | Javascript | fix markdown heading syntax | 186c8cb9e411b893ac31e718d98948d5f448a099 | <ide><path>src/ngAria/aria.js
<ide> * Find out more information about each directive by reading the
<ide> * {@link guide/accessibility ngAria Developer Guide}.
<ide> *
<del> * ##Example
<add> * ## Example
<ide> * Using ngDisabled with ngAria:
<ide> * ```html
<ide> * <md-checkbox ng-disabled="disabled">
<ide> * <md-checkbox ng-disabled="disabled" aria-disabled="true">
<ide> * ```
<ide> *
<del> * ##Disabling Attributes
<add> * ## Disabling Attributes
<ide> * It's possible to disable individual attributes added by ngAria with the
<ide> * {@link ngAria.$ariaProvider#config config} method. For more details, see the
<ide> * {@link guide/accessibility Developer Guide}. | 1 |
PHP | PHP | fix broken options in fixturetask | 77a820751815dc68771587650de407a06c36ca8e | <ide><path>src/Console/Command/Task/FixtureTask.php
<ide> public function getOptionParser() {
<ide> 'short' => 's',
<ide> 'boolean' => true
<ide> ])->addOption('records', [
<del> 'help' => __d('cake_console', 'Used with --count and <name>/all commands to pull [n] records from the live tables, where [n] is either --count or the default of 10.'),
<add> 'help' => __d('cake_console', 'Generate a fixture with records from the non-test database. Used with --count and --conditions to limit which records are added to the fixture.'),
<ide> 'short' => 'r',
<ide> 'boolean' => true
<add> ])->addOption('import-records', [
<add> 'help' => __d('cake_console', 'Set to true to import records from the live table when the generated fixture is used.'),
<add> 'boolean' => true
<ide> ])->addOption('conditions', [
<ide> 'help' => __d('cake_console', 'The SQL snippet to use when importing records.'),
<ide> 'default' => '1=1',
<ide> public function all() {
<ide> $tables = $this->Model->listAll($this->connection, false);
<ide>
<ide> foreach ($tables as $table) {
<del> $model = $this->_modelName($table);
<del> $importOptions = [];
<del> if (!empty($this->params['schema'])) {
<del> $importOptions['schema'] = $model;
<del> }
<del> $this->bake($model, false, $importOptions);
<del> }
<del> }
<del>/**
<del> * Interacts with the User to setup an array of import options. For a fixture.
<del> *
<del> * @param string $modelName Name of model you are dealing with.
<del> * @return array Array of import options.
<del> */
<del> public function importOptions($modelName) {
<del> $options = [];
<del>
<del> if (!empty($this->params['schema'])) {
<del> $options['schema'] = $modelName;
<del> }
<del> if (!empty($this->params['records'])) {
<del> $options['records'] = true;
<del> $options['fromTable'] = true;
<add> $this->main($table);
<ide> }
<del> return $options;
<ide> }
<ide>
<ide> /**
<ide> * Assembles and writes a Fixture file
<ide> *
<ide> * @param string $model Name of model to bake.
<ide> * @param string $useTable Name of table to use.
<del> * @param array $importOptions Options for public $import
<ide> * @return string Baked fixture content
<ide> * @throws \RuntimeException
<ide> */
<del> public function bake($model, $useTable = false, array $importOptions = []) {
<add> public function bake($model, $useTable = false) {
<ide> $table = $schema = $records = $import = $modelImport = null;
<del> $importBits = [];
<ide>
<ide> if (!$useTable) {
<ide> $useTable = Inflector::tableize($model);
<ide> } elseif ($useTable != Inflector::tableize($model)) {
<ide> $table = $useTable;
<ide> }
<ide>
<del> if (!empty($importOptions)) {
<del> if (isset($importOptions['schema'])) {
<del> $modelImport = true;
<del> $importBits[] = "'model' => '{$importOptions['schema']}'";
<del> }
<del> if (isset($importOptions['records'])) {
<del> $importBits[] = "'records' => true";
<del> }
<del> if ($this->connection !== 'default') {
<del> $importBits[] .= "'connection' => '{$this->connection}'";
<del> }
<del> if (!empty($importBits)) {
<del> $import = sprintf("[%s]", implode(', ', $importBits));
<del> }
<add> $importBits = [];
<add> if (!empty($this->params['schema'])) {
<add> $modelImport = true;
<add> $importBits[] = "'model' => '{$model}'";
<add> }
<add> if (!empty($this->params['import-records'])) {
<add> $importBits[] = "'records' => true";
<add> }
<add> if (!empty($importBits) && $this->connection !== 'default') {
<add> $importBits[] = "'connection' => '{$this->connection}'";
<add> }
<add> if (!empty($importBits)) {
<add> $import = sprintf("[%s]", implode(', ', $importBits));
<ide> }
<ide>
<ide> $connection = ConnectionManager::get($this->connection);
<ide> public function bake($model, $useTable = false, array $importOptions = []) {
<ide> $schema = $this->_generateSchema($data);
<ide> }
<ide>
<del> if (empty($importOptions['records']) && !isset($importOptions['fromTable'])) {
<add> if (empty($this->params['records']) && empty($this->params['import-records'])) {
<ide> $recordCount = 1;
<ide> if (isset($this->params['count'])) {
<ide> $recordCount = $this->params['count'];
<ide> }
<ide> $records = $this->_makeRecordString($this->_generateRecords($data, $recordCount));
<ide> }
<del> if (!empty($this->params['records']) || isset($importOptions['fromTable'])) {
<add> if (!empty($this->params['records']) && empty($this->params['import-records'])) {
<ide> $records = $this->_makeRecordString($this->_getRecordsFromTable($model, $useTable));
<ide> }
<ide> return $this->generateFixtureFile($model, compact('records', 'table', 'schema', 'import'));
<ide><path>tests/TestCase/Console/Command/Task/FixtureTaskTest.php
<ide> public function testGetPath() {
<ide> $this->assertPathEquals(ROOT . '/tests/Fixture/', $this->Task->getPath());
<ide> }
<ide>
<del>/**
<del> * test importOptions with overwriting command line options.
<del> *
<del> * @return void
<del> */
<del> public function testImportOptionsWithCommandLineOptions() {
<del> $this->Task->params = ['schema' => true, 'records' => true];
<del>
<del> $result = $this->Task->importOptions('Article');
<del> $expected = ['fromTable' => true, 'schema' => 'Article', 'records' => true];
<del> $this->assertEquals($expected, $result);
<del> }
<del>
<del>/**
<del> * test importOptions with schema.
<del> *
<del> * @return void
<del> */
<del> public function testImportOptionsWithSchema() {
<del> $this->Task->params = ['schema' => true];
<del>
<del> $result = $this->Task->importOptions('Articles');
<del> $expected = ['schema' => 'Articles'];
<del> $this->assertEquals($expected, $result);
<del> }
<del>
<del>/**
<del> * test importOptions with records.
<del> *
<del> * @return void
<del> */
<del> public function testImportOptionsWithRecords() {
<del> $this->Task->params = array('records' => true);
<del>
<del> $result = $this->Task->importOptions('Article');
<del> $expected = array('fromTable' => true, 'records' => true);
<del> $this->assertEquals($expected, $result);
<del> }
<del>
<ide> /**
<ide> * test generating a fixture with database conditions.
<ide> *
<ide> * @return void
<ide> */
<ide> public function testImportRecordsFromDatabaseWithConditionsPoo() {
<ide> $this->Task->connection = 'test';
<add> $this->Task->params = ['schema' => true, 'records' => true];
<ide>
<del> $result = $this->Task->bake('Articles', false, array(
<del> 'fromTable' => true,
<del> 'schema' => 'Articles',
<del> 'records' => false
<del> ));
<add> $result = $this->Task->bake('Articles');
<ide>
<ide> $this->assertContains('namespace App\Test\Fixture;', $result);
<ide> $this->assertContains('use Cake\TestSuite\Fixture\TestFixture;', $result);
<ide> public function testImportRecordsFromDatabaseWithConditionsPoo() {
<ide> */
<ide> public function testImportOptionsAlternateConnection() {
<ide> $this->Task->connection = 'test';
<del> $result = $this->Task->bake('Article', false, array('schema' => 'Article'));
<add> $this->Task->params = ['schema' => true];
<add> $result = $this->Task->bake('Article');
<ide> $this->assertContains("'connection' => 'test'", $result);
<ide> }
<ide>
<ide> public function testImportOptionsAlternateConnection() {
<ide> * @return void
<ide> */
<ide> public function testImportRecordsNoEscaping() {
<del> $db = ConnectionManager::get('test');
<del> if ($db instanceof Sqlserver) {
<del> $this->markTestSkipped('This test does not run on SQLServer');
<del> }
<del>
<ide> $articles = TableRegistry::get('Articles');
<ide> $articles->updateAll(['body' => "Body \"value\""], []);
<ide>
<ide> $this->Task->connection = 'test';
<del> $result = $this->Task->bake('Article', false, array(
<del> 'fromTable' => true,
<del> 'schema' => 'Article',
<del> 'records' => false
<del> ));
<add> $this->Task->params = ['schema' => 'true', 'records' => true];
<add> $result = $this->Task->bake('Article');
<ide> $this->assertContains("'body' => 'Body \"value\"'", $result, 'Data has bad escaping');
<ide> }
<ide>
<ide> public function testMainNoArgs() {
<ide> public function testBake() {
<ide> $this->Task->connection = 'test';
<ide>
<del> $result = $this->Task->bake('Article');
<del> $this->assertContains('class ArticleFixture extends TestFixture', $result);
<del> $this->assertContains('public $fields', $result);
<del> $this->assertContains('public $records', $result);
<del> $this->assertNotContains('public $import', $result);
<add> $this->Task->expects($this->at(0))
<add> ->method('createFile')
<add> ->with($this->anything(), $this->logicalAnd(
<add> $this->stringContains('class ArticleFixture extends TestFixture'),
<add> $this->stringContains('public $fields'),
<add> $this->stringContains('public $records'),
<add> $this->logicalNot($this->stringContains('public $import'))
<add> ));
<add> $result = $this->Task->main('Article');
<add> }
<ide>
<del> $result = $this->Task->bake('Article', 'comments');
<del> $this->assertContains('class ArticleFixture extends TestFixture', $result);
<del> $this->assertContains('public $table = \'comments\';', $result);
<del> $this->assertContains('public $fields = [', $result);
<add>/**
<add> * test main() with importing records
<add> *
<add> * @return void
<add> */
<add> public function testMainImportRecords() {
<add> $this->Task->connection = 'test';
<add> $this->Task->params = ['import-records' => true];
<add>
<add> $this->Task->expects($this->at(0))
<add> ->method('createFile')
<add> ->with($this->anything(), $this->logicalAnd(
<add> $this->stringContains("public \$import = ['records' => true, 'connection' => 'test'];"),
<add> $this->logicalNot($this->stringContains('public $records'))
<add> ));
<ide>
<del> $result = $this->Task->bake('Article', 'comments', array('records' => true));
<del> $this->assertContains("public \$import = ['records' => true, 'connection' => 'test'];", $result);
<del> $this->assertNotContains('public $records', $result);
<add> $this->Task->main('Article');
<add> }
<ide>
<del> $result = $this->Task->bake('Article', 'comments', array('schema' => 'Article'));
<del> $this->assertContains("public \$import = ['model' => 'Article', 'connection' => 'test'];", $result);
<del> $this->assertNotContains('public $fields', $result);
<add>/**
<add> * test main() with importing schema.
<add> *
<add> * @return void
<add> */
<add> public function testMainImportSchema() {
<add> $this->Task->connection = 'test';
<add> $this->Task->params = ['schema' => true, 'import-records' => true];
<ide>
<del> $result = $this->Task->bake('Article', 'comments', array('schema' => 'Article', 'records' => true));
<del> $this->assertContains("public \$import = ['model' => 'Article', 'records' => true, 'connection' => 'test'];", $result);
<del> $this->assertNotContains('public $fields', $result);
<del> $this->assertNotContains('public $records', $result);
<add> $this->Task->expects($this->once())
<add> ->method('createFile')
<add> ->with($this->anything(), $this->logicalAnd(
<add> $this->stringContains("public \$import = ['model' => 'Article', 'records' => true, 'connection' => 'test'];"),
<add> $this->logicalNot($this->stringContains('public $fields')),
<add> $this->logicalNot($this->stringContains('public $records'))
<add> ));
<add> $this->Task->bake('Article', 'comments');
<ide> }
<ide>
<ide> /** | 2 |
Python | Python | support partial labels with faster r-cnn | fc585957f779c3914f3bb94bdb6403a07a34a32d | <ide><path>research/object_detection/meta_architectures/faster_rcnn_meta_arch.py
<ide> class targets with the 0th index assumed to map to the background class.
<ide> unmatched_class_label=tf.constant(
<ide> [1] + self._num_classes * [0], dtype=tf.float32),
<ide> gt_weights_batch=groundtruth_weights_list)
<del>
<add> if self.groundtruth_has_field(
<add> fields.InputDataFields.groundtruth_labeled_classes):
<add> gt_labeled_classes = self.groundtruth_lists(
<add> fields.InputDataFields.groundtruth_labeled_classes)
<add> gt_labeled_classes = tf.pad(
<add> gt_labeled_classes, [[0, 0], [1, 0]],
<add> mode='CONSTANT',
<add> constant_values=1)
<add> batch_cls_weights *= tf.expand_dims(gt_labeled_classes, 1)
<ide> class_predictions_with_background = tf.reshape(
<ide> class_predictions_with_background,
<ide> [batch_size, self.max_num_proposals, -1]) | 1 |
Ruby | Ruby | link rich text to owning record | 69daf8f21dbceaa2414d17af46c5fd72e8d0050e | <ide><path>app/models/action_text/rich_text.rb
<ide> class ActionText::RichText < ActiveRecord::Base
<ide>
<ide> serialize :body, ActionText::Content
<ide>
<add> belongs_to :record, polymorphic: true, touch: true
<ide> has_many_attached :embeds
<ide>
<ide> after_save do | 1 |
Ruby | Ruby | generate application_mailer.rb if it is missing | 0b3ae023d27197417541932632055cd6be4810c4 | <ide><path>actionmailer/lib/rails/generators/mailer/mailer_generator.rb
<ide> class MailerGenerator < NamedBase
<ide>
<ide> def create_mailer_file
<ide> template "mailer.rb", File.join('app/mailers', class_path, "#{file_name}_mailer.rb")
<add>
<add> unless File.exist?('app/mailers/application_mailer.rb')
<add> template 'application_mailer.rb', 'app/mailers/application_mailer.rb'
<add> end
<ide> end
<ide>
<ide> hook_for :template_engine, :test_framework
<ide><path>actionmailer/lib/rails/generators/mailer/templates/application_mailer.rb
<add>class ApplicationMailer < ActionMailer::Base
<add> default from: 'from@example.com'
<add> layout :mailer
<add>end
<ide><path>railties/test/generators/mailer_generator_test.rb
<ide> def test_mailer_skeleton_is_created
<ide> assert_no_match(/default from: "from@example.com"/, mailer)
<ide> assert_no_match(/layout :mailer_notifier/, mailer)
<ide> end
<add>
<add> assert_file 'app/mailers/application_mailer.rb' do |mailer|
<add> assert_match(/class ApplicationMailer < ActionMailer::Base/, mailer)
<add> assert_match(/default from: 'from@example.com'/, mailer)
<add> assert_match(/layout :mailer/, mailer)
<add> end
<ide> end
<ide>
<ide> def test_mailer_with_i18n_helper | 3 |
Javascript | Javascript | add tests for search bar | 2b5268305a280c12e9167d89c359b4915d2988a3 | <ide><path>client/src/components/search/searchBar/SearchBar.js
<ide> const mapDispatchToProps = dispatch =>
<ide>
<ide> const placeholder = 'Search 5,000+ tutorials';
<ide>
<del>class SearchBar extends Component {
<add>export class SearchBar extends Component {
<ide> constructor(props) {
<ide> super(props);
<ide>
<ide> class SearchBar extends Component {
<ide> }
<ide>
<ide> componentDidMount() {
<del> const searchInput = document.querySelector('.ais-SearchBox-input');
<del> searchInput.id = 'fcc_instantsearch';
<ide> document.addEventListener('click', this.handleFocus);
<ide> }
<ide>
<ide><path>client/src/components/search/searchBar/SearchBar.test.js
<add>/* global jest, expect */
<add>import React from 'react';
<add>import 'jest-dom/extend-expect';
<add>import ShallowRenderer from 'react-test-renderer/shallow';
<add>
<add>import { SearchBar } from './SearchBar';
<add>
<add>describe('<SearchBar />', () => {
<add> it('renders to the DOM', () => {
<add> const shallow = new ShallowRenderer();
<add> shallow.render(<SearchBar {...searchBarProps} />);
<add> const result = shallow.getRenderOutput();
<add> expect(result).toBeTruthy();
<add> });
<add>
<add> /* Todo: When e2e testing is in place,
<add> add tests to check that the search bar
<add> resets to -1 on change/input, redirects to a
<add> selected hit, and redirects to dev news if
<add> there's a query and no hit is selected */
<add>});
<add>
<add>const searchBarProps = {
<add> toggleSearchDropdown: jest.fn(),
<add> toggleSearchFocused: jest.fn(),
<add> updateSearchQuery: jest.fn()
<add>}; | 2 |
Javascript | Javascript | improve aces filmic tone mapping | 74533eefd3ded6327ca08c3cae019947861a7f43 | <ide><path>src/renderers/shaders/ShaderChunk/tonemapping_pars_fragment.glsl.js
<ide> vec3 OptimizedCineonToneMapping( vec3 color ) {
<ide>
<ide> }
<ide>
<del>// source: https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/
<add>// source: https://github.com/selfshadow/ltc_code/blob/master/webgl/shaders/ltc/ltc_blit.fs
<add>vec3 RRTAndODTFit( vec3 v ) {
<add>
<add> vec3 a = v * ( v + 0.0245786 ) - 0.000090537;
<add> vec3 b = v * ( 0.983729 * v + 0.4329510 ) + 0.238081;
<add> return a / b;
<add>
<add>}
<add>
<ide> vec3 ACESFilmicToneMapping( vec3 color ) {
<ide>
<del> color *= toneMappingExposure;
<del> return saturate( ( color * ( 2.51 * color + 0.03 ) ) / ( color * ( 2.43 * color + 0.59 ) + 0.14 ) );
<add> // sRGB => XYZ => D65_2_D60 => AP1 => RRT_SAT
<add> const mat3 ACESInputMat = mat3(
<add> vec3( 0.59719, 0.07600, 0.02840 ), // transposed from source
<add> vec3( 0.35458, 0.90834, 0.13383 ),
<add> vec3( 0.04823, 0.01566, 0.83777 )
<add> );
<add>
<add> // ODT_SAT => XYZ => D60_2_D65 => sRGB
<add> const mat3 ACESOutputMat = mat3(
<add> vec3( 1.60475, -0.10208, -0.00327 ), // transposed from source
<add> vec3( -0.53108, 1.10813, -0.07276 ),
<add> vec3( -0.07367, -0.00605, 1.07602 )
<add> );
<add>
<add> color *= toneMappingExposure;
<add>
<add> color = ACESInputMat * color;
<add>
<add> // Apply RRT and ODT
<add> color = RRTAndODTFit( color );
<add>
<add> color = ACESOutputMat * color;
<add>
<add> // Clamp to [0, 1]
<add> return saturate( color );
<ide>
<ide> }
<ide> `; | 1 |
PHP | PHP | add missing "use" statement | 6156ce17cabb8dad0e1b7173671c2c50301dfbb9 | <ide><path>src/Http/Exception/BadRequestException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 400 error.
<ide> */
<ide><path>src/Http/Exception/ConflictException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 409 error.
<ide> */
<ide><path>src/Http/Exception/ForbiddenException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 403 error.
<ide> */
<ide><path>src/Http/Exception/GoneException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 410 error.
<ide> */
<ide><path>src/Http/Exception/InternalErrorException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 500 error.
<ide> */
<ide><path>src/Http/Exception/InvalidCsrfTokenException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 403 error caused by an invalid CSRF token
<ide> */
<ide><path>src/Http/Exception/MethodNotAllowedException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 405 error.
<ide> */
<ide><path>src/Http/Exception/NotAcceptableException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 406 error.
<ide> */
<ide><path>src/Http/Exception/NotFoundException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 404 error.
<ide> */
<ide><path>src/Http/Exception/ServiceUnavailableException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 503 error.
<ide> */
<ide><path>src/Http/Exception/UnauthorizedException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 401 error.
<ide> */
<ide><path>src/Http/Exception/UnavailableForLegalReasonsException.php
<ide> */
<ide> namespace Cake\Http\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Represents an HTTP 451 error.
<ide> */ | 12 |
Text | Text | add the closed parenthesis. | e4acd7db591c9cd77d960eab6ec60ab3361062df | <ide><path>readme.md
<ide> Next.js can be deployed to other hosting solutions too. Please have a look at th
<ide>
<ide> Note: `NODE_ENV` is properly configured by the `next` subcommands, if absent, to maximize performance. if you’re using Next.js [programmatically](#custom-server-and-routing), it’s your responsibility to set `NODE_ENV=production` manually!
<ide>
<del>Note: we recommend putting `.next`, or your custom dist folder (Please have a look at ['Custom Config'](https://github.com/zeit/next.js#custom-configuration). You can set a custom folder in config, `.npmignore`, or `.gitignore`. Otherwise, use `files` or `now.files` to opt-into a whitelist of files you want to deploy (and obviously exclude `.next` or your custom dist folder).
<add>Note: we recommend putting `.next`, or your custom dist folder (Please have a look at ['Custom Config'](https://github.com/zeit/next.js#custom-configuration)). You can set a custom folder in config, `.npmignore`, or `.gitignore`. Otherwise, use `files` or `now.files` to opt-into a whitelist of files you want to deploy (and obviously exclude `.next` or your custom dist folder).
<ide>
<ide> ## Static HTML export
<ide> | 1 |
Text | Text | use correct code block fences | 193f727ba7fec70781ca389c86f2595ade935621 | <ide><path>docs/creating-a-package.md
<ide> snippets match a scope with the same specificity.
<ide> If you're developing a new language grammar, you'll want to place your file in
<ide> the _grammars_ directory. Each grammar is a pairing of two keys, `match` and
<ide> `captures`. `match` is a regular expression identifying the pattern to highlight,
<del>while `captures` is a JSON representing what to do with each matching group.
<add>while `captures` is an object representing what to do with each matching group.
<add>
<ide> For example:
<ide>
<ide>
<del>```json
<add>```coffeescript
<ide> {
<ide> 'match': '(?:^|\\s)(__[^_]+__)'
<ide> 'captures':
<ide> This indicates that the first matching capture (`(__[^_]+__)`) should have the
<ide>
<ide> To capture a single group, simply use the `name` key instead:
<ide>
<del>```json
<add>```coffeescript
<ide> {
<ide> 'match': '^#{1,6}\\s+.+$'
<ide> 'name': 'markup.heading.gfm'
<ide> More information about the significance of these tokens can be found in
<ide> Your grammar should also include a `filetypes` array, which is a list of file extensions
<ide> your grammar supports:
<ide>
<del>```
<add>```coffeescript
<ide> 'fileTypes': [
<ide> 'markdown'
<ide> 'md'
<ide> your grammar supports:
<ide> Your package **should** have tests, and if they're placed in the _spec_ directory,
<ide> they can be run by Atom.
<ide>
<del>Under the hood, [Jasmine](https://github.com/pivotal/jasmine) is being used to run
<del>to execute the tests, so you can assume that any DSL available there is available
<add>Under the hood, [Jasmine](https://github.com/pivotal/jasmine) is being used to
<add>execute the tests, so you can assume that any DSL available there is available
<ide> to your package as well.
<ide>
<ide> # Full Example
<ide> our keybinding executes a new command called `magic`.
<ide>
<ide> _keymaps/changer.cson_ can easily become this:
<ide>
<del>```cson
<add>```coffeescript
<ide> '.tree-view-scroller':
<ide> 'ctrl-V': 'changer:magic'
<ide> ``` | 1 |
Text | Text | fix examples in repl.md | 8092fb0a59927159ec68c914771c6be484116203 | <ide><path>doc/api/repl.md
<ide> replServer.defineCommand('sayhello', {
<ide> this.displayPrompt();
<ide> }
<ide> });
<del>replServer.defineCommand('saybye', () => {
<add>replServer.defineCommand('saybye', function saybye() {
<ide> console.log('Goodbye!');
<ide> this.close();
<ide> });
<ide> without passing any arguments (or by passing the `-i` argument):
<ide> ```js
<ide> $ node
<ide> > const a = [1, 2, 3];
<add>undefined
<add>> a
<ide> [ 1, 2, 3 ]
<ide> > a.forEach((v) => {
<ide> ... console.log(v); | 1 |
Java | Java | reset view tag when recycling | bed977b17888ea79465f5060443add7d9d7d23d0 | <ide><path>ReactAndroid/src/main/java/com/facebook/react/uimanager/BaseViewManager.java
<ide> @Override
<ide> protected T prepareToRecycleView(@NonNull ThemedReactContext reactContext, T view) {
<ide> // Reset tags
<add> view.setTag(null);
<ide> view.setTag(R.id.pointer_enter, null);
<ide> view.setTag(R.id.pointer_leave, null);
<ide> view.setTag(R.id.pointer_move, null); | 1 |
Ruby | Ruby | remove unused string substitution | 12f5158f098cdc714e826bfb0d3f722a1e9753c8 | <ide><path>activerecord/lib/active_record/connection_adapters/sqlite_adapter.rb
<ide> def empty_insert_statement_value
<ide>
<ide> protected
<ide> def select(sql, name = nil, binds = []) #:nodoc:
<del> result = exec_query(sql, name, binds)
<del> columns = result.columns.map { |column|
<del> column.sub(/^"?\w+"?\./, '')
<del> }
<del>
<del> result.rows.map { |row| Hash[columns.zip(row)] }
<add> exec_query(sql, name, binds).to_a
<ide> end
<ide>
<ide> def table_structure(table_name) | 1 |
PHP | PHP | avoid paginate count when no results | 88240b28748ecb96d5ecf63fa84dc7432461ad8d | <ide><path>lib/Cake/Controller/Component/PaginatorComponent.php
<ide> public function paginate($object = null, $scope = array(), $whitelist = array())
<ide> $defaults = $this->getDefaults($object->alias);
<ide> unset($defaults[0]);
<ide>
<del> if ($object->hasMethod('paginateCount')) {
<add> if (!$results) {
<add> $count = 0;
<add> } elseif ($object->hasMethod('paginateCount')) {
<ide> $count = $object->paginateCount($conditions, $recursive, $extra);
<ide> } else {
<ide> $parameters = compact('conditions'); | 1 |
Java | Java | add timeout and unit to timeoutexception message | 6126752a3a26e084af4cd0e096d77b94ce2a8f94 | <ide><path>src/main/java/io/reactivex/internal/observers/BlockingMultiObserver.java
<ide> import io.reactivex.disposables.Disposable;
<ide> import io.reactivex.internal.util.*;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<add>
<ide> /**
<ide> * A combined Observer that awaits the success or error signal via a CountDownLatch.
<ide> * @param <T> the value type
<ide> public Throwable blockingGetError(long timeout, TimeUnit unit) {
<ide> BlockingHelper.verifyNonBlocking();
<ide> if (!await(timeout, unit)) {
<ide> dispose();
<del> throw ExceptionHelper.wrapOrThrow(new TimeoutException());
<add> throw ExceptionHelper.wrapOrThrow(new TimeoutException(timeoutMessage(timeout, unit)));
<ide> }
<ide> } catch (InterruptedException ex) {
<ide> dispose();
<ide><path>src/main/java/io/reactivex/internal/observers/FutureObserver.java
<ide> import io.reactivex.internal.util.BlockingHelper;
<ide> import io.reactivex.plugins.RxJavaPlugins;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<add>
<ide> /**
<ide> * An Observer + Future that expects exactly one upstream value and provides it
<ide> * via the (blocking) Future API.
<ide> public T get(long timeout, TimeUnit unit) throws InterruptedException, Execution
<ide> if (getCount() != 0) {
<ide> BlockingHelper.verifyNonBlocking();
<ide> if (!await(timeout, unit)) {
<del> throw new TimeoutException();
<add> throw new TimeoutException(timeoutMessage(timeout, unit));
<ide> }
<ide> }
<ide>
<ide><path>src/main/java/io/reactivex/internal/observers/FutureSingleObserver.java
<ide> import io.reactivex.internal.util.BlockingHelper;
<ide> import io.reactivex.plugins.RxJavaPlugins;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<add>
<ide> /**
<ide> * An Observer + Future that expects exactly one upstream value and provides it
<ide> * via the (blocking) Future API.
<ide> public T get(long timeout, TimeUnit unit) throws InterruptedException, Execution
<ide> if (getCount() != 0) {
<ide> BlockingHelper.verifyNonBlocking();
<ide> if (!await(timeout, unit)) {
<del> throw new TimeoutException();
<add> throw new TimeoutException(timeoutMessage(timeout, unit));
<ide> }
<ide> }
<ide>
<ide><path>src/main/java/io/reactivex/internal/operators/completable/CompletableTimeout.java
<ide> import io.reactivex.disposables.*;
<ide> import io.reactivex.plugins.RxJavaPlugins;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<add>
<ide> public final class CompletableTimeout extends Completable {
<ide>
<ide> final CompletableSource source;
<ide> public void run() {
<ide> if (once.compareAndSet(false, true)) {
<ide> set.clear();
<ide> if (other == null) {
<del> downstream.onError(new TimeoutException());
<add> downstream.onError(new TimeoutException(timeoutMessage(timeout, unit)));
<ide> } else {
<ide> other.subscribe(new DisposeObserver());
<ide> }
<ide><path>src/main/java/io/reactivex/internal/operators/flowable/FlowableTimeoutTimed.java
<ide> import io.reactivex.internal.subscriptions.*;
<ide> import io.reactivex.plugins.RxJavaPlugins;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<add>
<ide> public final class FlowableTimeoutTimed<T> extends AbstractFlowableWithUpstream<T, T> {
<ide> final long timeout;
<ide> final TimeUnit unit;
<ide> public void onTimeout(long idx) {
<ide> if (compareAndSet(idx, Long.MAX_VALUE)) {
<ide> SubscriptionHelper.cancel(upstream);
<ide>
<del> downstream.onError(new TimeoutException());
<add> downstream.onError(new TimeoutException(timeoutMessage(timeout, unit)));
<ide>
<ide> worker.dispose();
<ide> }
<ide><path>src/main/java/io/reactivex/internal/operators/observable/ObservableTimeoutTimed.java
<ide> import io.reactivex.internal.disposables.*;
<ide> import io.reactivex.plugins.RxJavaPlugins;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<add>
<ide> public final class ObservableTimeoutTimed<T> extends AbstractObservableWithUpstream<T, T> {
<ide> final long timeout;
<ide> final TimeUnit unit;
<ide> public void onTimeout(long idx) {
<ide> if (compareAndSet(idx, Long.MAX_VALUE)) {
<ide> DisposableHelper.dispose(upstream);
<ide>
<del> downstream.onError(new TimeoutException());
<add> downstream.onError(new TimeoutException(timeoutMessage(timeout, unit)));
<ide>
<ide> worker.dispose();
<ide> }
<ide><path>src/main/java/io/reactivex/internal/operators/single/SingleTimeout.java
<ide> import io.reactivex.internal.disposables.DisposableHelper;
<ide> import io.reactivex.plugins.RxJavaPlugins;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<add>
<ide> public final class SingleTimeout<T> extends Single<T> {
<ide>
<ide> final SingleSource<T> source;
<ide> public SingleTimeout(SingleSource<T> source, long timeout, TimeUnit unit, Schedu
<ide> @Override
<ide> protected void subscribeActual(final SingleObserver<? super T> observer) {
<ide>
<del> TimeoutMainObserver<T> parent = new TimeoutMainObserver<T>(observer, other);
<add> TimeoutMainObserver<T> parent = new TimeoutMainObserver<T>(observer, other, timeout, unit);
<ide> observer.onSubscribe(parent);
<ide>
<ide> DisposableHelper.replace(parent.task, scheduler.scheduleDirect(parent, timeout, unit));
<ide> protected void subscribeActual(final SingleObserver<? super T> observer) {
<ide>
<ide> SingleSource<? extends T> other;
<ide>
<add> final long timeout;
<add>
<add> final TimeUnit unit;
<add>
<ide> static final class TimeoutFallbackObserver<T> extends AtomicReference<Disposable>
<ide> implements SingleObserver<T> {
<ide>
<ide> public void onError(Throwable e) {
<ide> }
<ide> }
<ide>
<del> TimeoutMainObserver(SingleObserver<? super T> actual, SingleSource<? extends T> other) {
<add> TimeoutMainObserver(SingleObserver<? super T> actual, SingleSource<? extends T> other, long timeout, TimeUnit unit) {
<ide> this.downstream = actual;
<ide> this.other = other;
<add> this.timeout = timeout;
<add> this.unit = unit;
<ide> this.task = new AtomicReference<Disposable>();
<ide> if (other != null) {
<ide> this.fallback = new TimeoutFallbackObserver<T>(actual);
<ide> public void run() {
<ide> }
<ide> SingleSource<? extends T> other = this.other;
<ide> if (other == null) {
<del> downstream.onError(new TimeoutException());
<add> downstream.onError(new TimeoutException(timeoutMessage(timeout, unit)));
<ide> } else {
<ide> this.other = null;
<ide> other.subscribe(fallback);
<ide><path>src/main/java/io/reactivex/internal/subscribers/FutureSubscriber.java
<ide> import io.reactivex.internal.util.BlockingHelper;
<ide> import io.reactivex.plugins.RxJavaPlugins;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<add>
<ide> /**
<ide> * A Subscriber + Future that expects exactly one upstream value and provides it
<ide> * via the (blocking) Future API.
<ide> public T get(long timeout, TimeUnit unit) throws InterruptedException, Execution
<ide> if (getCount() != 0) {
<ide> BlockingHelper.verifyNonBlocking();
<ide> if (!await(timeout, unit)) {
<del> throw new TimeoutException();
<add> throw new TimeoutException(timeoutMessage(timeout, unit));
<ide> }
<ide> }
<ide>
<ide><path>src/main/java/io/reactivex/internal/util/ExceptionHelper.java
<ide> package io.reactivex.internal.util;
<ide>
<ide> import java.util.*;
<add>import java.util.concurrent.TimeUnit;
<ide> import java.util.concurrent.atomic.AtomicReference;
<ide>
<ide> import io.reactivex.exceptions.CompositeException;
<ide> public static <E extends Throwable> Exception throwIfThrowable(Throwable e) thro
<ide> throw (E)e;
<ide> }
<ide>
<add> public static String timeoutMessage(long timeout, TimeUnit unit) {
<add> return "The source did not signal an event for "
<add> + timeout
<add> + " "
<add> + unit.toString().toLowerCase()
<add> + " and has been terminated.";
<add> }
<add>
<ide> static final class Termination extends Throwable {
<ide>
<ide> private static final long serialVersionUID = -4649703670690200604L;
<ide><path>src/test/java/io/reactivex/internal/observers/BlockingMultiObserverTest.java
<ide>
<ide> package io.reactivex.internal.observers;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<ide> import static org.junit.Assert.*;
<ide>
<ide> import java.util.concurrent.TimeUnit;
<add>import java.util.concurrent.TimeoutException;
<ide>
<ide> import org.junit.Test;
<ide>
<ide> public void run() {
<ide>
<ide> assertTrue(bmo.blockingGetError(1, TimeUnit.MINUTES) instanceof TestException);
<ide> }
<add>
<add> @Test
<add> public void blockingGetErrorTimedOut() {
<add> final BlockingMultiObserver<Integer> bmo = new BlockingMultiObserver<Integer>();
<add>
<add> try {
<add> assertNull(bmo.blockingGetError(1, TimeUnit.NANOSECONDS));
<add> fail("Should have thrown");
<add> } catch (RuntimeException expected) {
<add> assertEquals(TimeoutException.class, expected.getCause().getClass());
<add> assertEquals(timeoutMessage(1, TimeUnit.NANOSECONDS), expected.getCause().getMessage());
<add> }
<add> }
<ide> }
<ide><path>src/test/java/io/reactivex/internal/observers/FutureObserverTest.java
<ide>
<ide> package io.reactivex.internal.observers;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<ide> import static org.junit.Assert.*;
<ide>
<ide> import java.util.*;
<ide> public void run() {
<ide>
<ide> assertEquals(1, fo.get().intValue());
<ide> }
<add>
<add> @Test
<add> public void getTimedOut() throws Exception {
<add> try {
<add> fo.get(1, TimeUnit.NANOSECONDS);
<add> fail("Should have thrown");
<add> } catch (TimeoutException expected) {
<add> assertEquals(timeoutMessage(1, TimeUnit.NANOSECONDS), expected.getMessage());
<add> }
<add> }
<ide> }
<ide><path>src/test/java/io/reactivex/internal/observers/FutureSingleObserverTest.java
<ide>
<ide> package io.reactivex.internal.observers;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<ide> import static org.junit.Assert.*;
<ide>
<ide> import java.util.concurrent.*;
<ide> public void timeout() throws Exception {
<ide> try {
<ide> f.get(100, TimeUnit.MILLISECONDS);
<ide> fail("Should have thrown");
<del> } catch (TimeoutException ex) {
<del> // expected
<add> } catch (TimeoutException expected) {
<add> assertEquals(timeoutMessage(100, TimeUnit.MILLISECONDS), expected.getMessage());
<ide> }
<ide> }
<ide>
<ide><path>src/test/java/io/reactivex/internal/operators/completable/CompletableTimeoutTest.java
<ide>
<ide> package io.reactivex.internal.operators.completable;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<ide> import static org.junit.Assert.*;
<ide>
<ide> import java.util.List;
<ide> public void timeoutException() throws Exception {
<ide> .timeout(100, TimeUnit.MILLISECONDS, Schedulers.io())
<ide> .test()
<ide> .awaitDone(5, TimeUnit.SECONDS)
<del> .assertFailure(TimeoutException.class);
<add> .assertFailureAndMessage(TimeoutException.class, timeoutMessage(100, TimeUnit.MILLISECONDS));
<ide> }
<ide>
<ide> @Test
<ide><path>src/test/java/io/reactivex/internal/operators/flowable/FlowableTimeoutTests.java
<ide>
<ide> package io.reactivex.internal.operators.flowable;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<ide> import static org.junit.Assert.*;
<ide> import static org.mockito.ArgumentMatchers.*;
<ide> import static org.mockito.Mockito.*;
<ide> public void shouldNotTimeoutIfSecondOnNextWithinTimeout() {
<ide>
<ide> @Test
<ide> public void shouldTimeoutIfOnNextNotWithinTimeout() {
<del> Subscriber<String> subscriber = TestHelper.mockSubscriber();
<del> TestSubscriber<String> ts = new TestSubscriber<String>(subscriber);
<add> TestSubscriber<String> subscriber = new TestSubscriber<String>();
<ide>
<del> withTimeout.subscribe(ts);
<add> withTimeout.subscribe(subscriber);
<ide>
<ide> testScheduler.advanceTimeBy(TIMEOUT + 1, TimeUnit.SECONDS);
<del> verify(subscriber).onError(any(TimeoutException.class));
<del> ts.dispose();
<add> subscriber.assertFailureAndMessage(TimeoutException.class, timeoutMessage(TIMEOUT, TIME_UNIT));
<ide> }
<ide>
<ide> @Test
<ide> public void shouldTimeoutIfSecondOnNextNotWithinTimeout() {
<del> Subscriber<String> subscriber = TestHelper.mockSubscriber();
<add> TestSubscriber<String> subscriber = new TestSubscriber<String>();
<ide> TestSubscriber<String> ts = new TestSubscriber<String>(subscriber);
<ide> withTimeout.subscribe(subscriber);
<ide> testScheduler.advanceTimeBy(2, TimeUnit.SECONDS);
<ide> underlyingSubject.onNext("One");
<del> verify(subscriber).onNext("One");
<add> subscriber.assertValue("One");
<ide> testScheduler.advanceTimeBy(TIMEOUT + 1, TimeUnit.SECONDS);
<del> verify(subscriber).onError(any(TimeoutException.class));
<add> subscriber.assertFailureAndMessage(TimeoutException.class, timeoutMessage(TIMEOUT, TIME_UNIT), "One");
<ide> ts.dispose();
<ide> }
<ide>
<ide> public void shouldTimeoutIfSynchronizedFlowableEmitFirstOnNextNotWithinTimeout()
<ide> final CountDownLatch exit = new CountDownLatch(1);
<ide> final CountDownLatch timeoutSetuped = new CountDownLatch(1);
<ide>
<del> final Subscriber<String> subscriber = TestHelper.mockSubscriber();
<del> final TestSubscriber<String> ts = new TestSubscriber<String>(subscriber);
<add> final TestSubscriber<String> subscriber = new TestSubscriber<String>();
<ide>
<ide> new Thread(new Runnable() {
<ide>
<ide> public void subscribe(Subscriber<? super String> subscriber) {
<ide> }
<ide>
<ide> }).timeout(1, TimeUnit.SECONDS, testScheduler)
<del> .subscribe(ts);
<add> .subscribe(subscriber);
<ide> }
<ide> }).start();
<ide>
<ide> timeoutSetuped.await();
<ide> testScheduler.advanceTimeBy(2, TimeUnit.SECONDS);
<ide>
<del> InOrder inOrder = inOrder(subscriber);
<del> inOrder.verify(subscriber, times(1)).onError(isA(TimeoutException.class));
<del> inOrder.verifyNoMoreInteractions();
<add> subscriber.assertFailureAndMessage(TimeoutException.class, timeoutMessage(1, TimeUnit.SECONDS));
<ide>
<ide> exit.countDown(); // exit the thread
<ide> }
<ide> public void subscribe(Subscriber<? super String> subscriber) {
<ide> TestScheduler testScheduler = new TestScheduler();
<ide> Flowable<String> observableWithTimeout = never.timeout(1000, TimeUnit.MILLISECONDS, testScheduler);
<ide>
<del> Subscriber<String> subscriber = TestHelper.mockSubscriber();
<del> TestSubscriber<String> ts = new TestSubscriber<String>(subscriber);
<del> observableWithTimeout.subscribe(ts);
<add> TestSubscriber<String> subscriber = new TestSubscriber<String>();
<add> observableWithTimeout.subscribe(subscriber);
<ide>
<ide> testScheduler.advanceTimeBy(2000, TimeUnit.MILLISECONDS);
<ide>
<del> InOrder inOrder = inOrder(subscriber);
<del> inOrder.verify(subscriber).onError(isA(TimeoutException.class));
<del> inOrder.verifyNoMoreInteractions();
<add> subscriber.assertFailureAndMessage(TimeoutException.class, timeoutMessage(1000, TimeUnit.MILLISECONDS));
<ide>
<ide> verify(s, times(1)).cancel();
<ide> }
<ide> public void run() {
<ide> if (ts.valueCount() != 0) {
<ide> if (ts.errorCount() != 0) {
<ide> ts.assertFailure(TimeoutException.class, 1);
<add> ts.assertErrorMessage(timeoutMessage(1, TimeUnit.SECONDS));
<ide> } else {
<ide> ts.assertValuesOnly(1);
<ide> }
<ide> } else {
<ide> ts.assertFailure(TimeoutException.class);
<add> ts.assertErrorMessage(timeoutMessage(1, TimeUnit.SECONDS));
<ide> }
<ide> }
<ide> }
<ide><path>src/test/java/io/reactivex/internal/operators/observable/ObservableTimeoutTests.java
<ide>
<ide> package io.reactivex.internal.operators.observable;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<ide> import static org.junit.Assert.*;
<ide> import static org.mockito.ArgumentMatchers.*;
<ide> import static org.mockito.Mockito.*;
<ide> public void shouldNotTimeoutIfSecondOnNextWithinTimeout() {
<ide>
<ide> @Test
<ide> public void shouldTimeoutIfOnNextNotWithinTimeout() {
<del> Observer<String> observer = TestHelper.mockObserver();
<del> TestObserver<String> to = new TestObserver<String>(observer);
<add> TestObserver<String> observer = new TestObserver<String>();
<ide>
<del> withTimeout.subscribe(to);
<add> withTimeout.subscribe(observer);
<ide>
<ide> testScheduler.advanceTimeBy(TIMEOUT + 1, TimeUnit.SECONDS);
<del> verify(observer).onError(any(TimeoutException.class));
<del> to.dispose();
<add> observer.assertFailureAndMessage(TimeoutException.class, timeoutMessage(TIMEOUT, TIME_UNIT));
<ide> }
<ide>
<ide> @Test
<ide> public void shouldTimeoutIfSecondOnNextNotWithinTimeout() {
<del> Observer<String> observer = TestHelper.mockObserver();
<del> TestObserver<String> to = new TestObserver<String>(observer);
<add> TestObserver<String> observer = new TestObserver<String>();
<ide> withTimeout.subscribe(observer);
<ide> testScheduler.advanceTimeBy(2, TimeUnit.SECONDS);
<ide> underlyingSubject.onNext("One");
<del> verify(observer).onNext("One");
<add> observer.assertValue("One");
<ide> testScheduler.advanceTimeBy(TIMEOUT + 1, TimeUnit.SECONDS);
<del> verify(observer).onError(any(TimeoutException.class));
<del> to.dispose();
<add> observer.assertFailureAndMessage(TimeoutException.class, timeoutMessage(TIMEOUT, TIME_UNIT), "One");
<ide> }
<ide>
<ide> @Test
<ide> public void shouldTimeoutIfSynchronizedObservableEmitFirstOnNextNotWithinTimeout
<ide> final CountDownLatch exit = new CountDownLatch(1);
<ide> final CountDownLatch timeoutSetuped = new CountDownLatch(1);
<ide>
<del> final Observer<String> observer = TestHelper.mockObserver();
<del> final TestObserver<String> to = new TestObserver<String>(observer);
<add> final TestObserver<String> observer = new TestObserver<String>();
<ide>
<ide> new Thread(new Runnable() {
<ide>
<ide> public void subscribe(Observer<? super String> observer) {
<ide> }
<ide>
<ide> }).timeout(1, TimeUnit.SECONDS, testScheduler)
<del> .subscribe(to);
<add> .subscribe(observer);
<ide> }
<ide> }).start();
<ide>
<ide> timeoutSetuped.await();
<ide> testScheduler.advanceTimeBy(2, TimeUnit.SECONDS);
<ide>
<del> InOrder inOrder = inOrder(observer);
<del> inOrder.verify(observer, times(1)).onError(isA(TimeoutException.class));
<del> inOrder.verifyNoMoreInteractions();
<add> observer.assertFailureAndMessage(TimeoutException.class, timeoutMessage(1, TimeUnit.SECONDS));
<ide>
<ide> exit.countDown(); // exit the thread
<ide> }
<ide> public void subscribe(Observer<? super String> observer) {
<ide> TestScheduler testScheduler = new TestScheduler();
<ide> Observable<String> observableWithTimeout = never.timeout(1000, TimeUnit.MILLISECONDS, testScheduler);
<ide>
<del> Observer<String> observer = TestHelper.mockObserver();
<del> TestObserver<String> to = new TestObserver<String>(observer);
<del> observableWithTimeout.subscribe(to);
<add> TestObserver<String> observer = new TestObserver<String>();
<add> observableWithTimeout.subscribe(observer);
<ide>
<ide> testScheduler.advanceTimeBy(2000, TimeUnit.MILLISECONDS);
<ide>
<del> InOrder inOrder = inOrder(observer);
<del> inOrder.verify(observer).onError(isA(TimeoutException.class));
<del> inOrder.verifyNoMoreInteractions();
<add> observer.assertFailureAndMessage(TimeoutException.class, timeoutMessage(1000, TimeUnit.MILLISECONDS));
<ide>
<ide> verify(upstream, times(1)).dispose();
<ide> }
<ide> public void run() {
<ide> if (to.valueCount() != 0) {
<ide> if (to.errorCount() != 0) {
<ide> to.assertFailure(TimeoutException.class, 1);
<add> to.assertErrorMessage(timeoutMessage(1, TimeUnit.SECONDS));
<ide> } else {
<ide> to.assertValuesOnly(1);
<ide> }
<ide> } else {
<ide> to.assertFailure(TimeoutException.class);
<add> to.assertErrorMessage(timeoutMessage(1, TimeUnit.SECONDS));
<ide> }
<ide> }
<ide> }
<ide><path>src/test/java/io/reactivex/internal/operators/single/SingleTimeoutTest.java
<ide>
<ide> package io.reactivex.internal.operators.single;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<ide> import static org.junit.Assert.*;
<ide>
<ide> import java.util.List;
<ide> import java.util.concurrent.TimeUnit;
<add>import java.util.concurrent.TimeoutException;
<ide>
<ide> import org.junit.Test;
<ide>
<ide> public void run() {
<ide> RxJavaPlugins.reset();
<ide> }
<ide> }
<add>
<add> @Test
<add> public void mainTimedOut() {
<add> Single
<add> .never()
<add> .timeout(1, TimeUnit.NANOSECONDS)
<add> .test()
<add> .awaitDone(5, TimeUnit.SECONDS)
<add> .assertFailureAndMessage(TimeoutException.class, timeoutMessage(1, TimeUnit.NANOSECONDS));
<add> }
<ide> }
<ide><path>src/test/java/io/reactivex/internal/subscribers/FutureSubscriberTest.java
<ide>
<ide> package io.reactivex.internal.subscribers;
<ide>
<add>import static io.reactivex.internal.util.ExceptionHelper.timeoutMessage;
<ide> import static org.junit.Assert.*;
<ide>
<ide> import java.util.*;
<ide> public void run() {
<ide>
<ide> assertEquals(1, fs.get().intValue());
<ide> }
<add>
<add> @Test
<add> public void getTimedOut() throws Exception {
<add> try {
<add> fs.get(1, TimeUnit.NANOSECONDS);
<add> fail("Should have thrown");
<add> } catch (TimeoutException expected) {
<add> assertEquals(timeoutMessage(1, TimeUnit.NANOSECONDS), expected.getMessage());
<add> }
<add> }
<ide> } | 17 |
Go | Go | fix faulty errcheck | 639ab92f011245e17e9a293455a8dae1eb034022 | <ide><path>daemon/graphdriver/overlay2/overlay.go
<ide> func (d *Driver) Remove(id string) error {
<ide> }
<ide>
<ide> // Get creates and mounts the required file system for the given id and returns the mount path.
<del>func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
<add>func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) {
<ide> d.locker.Lock(id)
<ide> defer d.locker.Unlock(id)
<ide> dir := d.dir(id)
<ide> func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
<ide> return containerfs.NewLocalContainerFS(mergedDir), nil
<ide> }
<ide> defer func() {
<del> if err != nil {
<add> if retErr != nil {
<ide> if c := d.ctr.Decrement(mergedDir); c <= 0 {
<del> unix.Unmount(mergedDir, 0)
<add> if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
<add> logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr)
<add> }
<ide> }
<ide> }
<ide> }() | 1 |
Javascript | Javascript | fix typo in comment (accumlated → accumulated) | 0eea5772486318c5b2922c8b36680cf4744615d6 | <ide><path>packages/react-reconciler/src/ReactFiberBeginWork.new.js
<ide> function updateSimpleMemoComponent(
<ide> // The pending lanes were cleared at the beginning of beginWork. We're
<ide> // about to bail out, but there might be other lanes that weren't
<ide> // included in the current render. Usually, the priority level of the
<del> // remaining updates is accumlated during the evaluation of the
<add> // remaining updates is accumulated during the evaluation of the
<ide> // component (i.e. when processing the update queue). But since since
<ide> // we're bailing out early *without* evaluating the component, we need
<ide> // to account for it here, too. Reset to the value of the current fiber.
<ide><path>packages/react-reconciler/src/ReactFiberBeginWork.old.js
<ide> function updateSimpleMemoComponent(
<ide> // The pending lanes were cleared at the beginning of beginWork. We're
<ide> // about to bail out, but there might be other lanes that weren't
<ide> // included in the current render. Usually, the priority level of the
<del> // remaining updates is accumlated during the evaluation of the
<add> // remaining updates is accumulated during the evaluation of the
<ide> // component (i.e. when processing the update queue). But since since
<ide> // we're bailing out early *without* evaluating the component, we need
<ide> // to account for it here, too. Reset to the value of the current fiber. | 2 |
Javascript | Javascript | add fs#writefilesync to the mock | b3fc64285eae3a510490aeace388b6ec22a11892 | <ide><path>local-cli/util/__mocks__/fs.js
<ide>
<ide> 'use strict';
<ide>
<add>const asyncify = require('async/asyncify');
<ide> const {EventEmitter} = require('events');
<ide> const {dirname} = require.requireActual('path');
<ide> const fs = jest.genMockFromModule('fs');
<add>const invariant = require('fbjs/lib/invariant');
<ide> const path = require('path');
<ide> const stream = require.requireActual('stream');
<ide>
<ide> fs.readFile.mockImplementation(function(filepath, encoding, callback) {
<ide> let node;
<ide> try {
<ide> node = getToNode(filepath);
<del> // dir check
<del> if (node && typeof node === 'object' && node.SYMLINK == null) {
<add> if (isDirNode(node)) {
<ide> callback(new Error('Error readFile a dir: ' + filepath));
<ide> }
<ide> if (node == null) {
<ide> fs.readFile.mockImplementation(function(filepath, encoding, callback) {
<ide>
<ide> fs.readFileSync.mockImplementation(function(filepath, encoding) {
<ide> const node = getToNode(filepath);
<del> // dir check
<del> if (node && typeof node === 'object' && node.SYMLINK == null) {
<add> if (isDirNode(node)) {
<ide> throw new Error('Error readFileSync a dir: ' + filepath);
<ide> }
<ide> return node;
<ide> });
<ide>
<add>fs.writeFile.mockImplementation(asyncify(fs.writeFileSync));
<add>
<add>fs.writeFileSync.mockImplementation((filePath, content, options) => {
<add> if (options == null || typeof options === 'string') {
<add> options = {encoding: options};
<add> }
<add> invariant(
<add> options.encoding == null || options.encoding === 'utf8',
<add> '`options` argument supports only `null` or `"utf8"`',
<add> );
<add> const dirPath = path.dirname(filePath);
<add> const node = getToNode(dirPath);
<add> if (!isDirNode(node)) {
<add> throw fsError('ENOTDIR', 'not a directory: ' + dirPath);
<add> }
<add> node[path.basename(filePath)] = content;
<add>});
<add>
<add>fs.mkdir.mockImplementation(asyncify(fs.mkdirSync));
<add>
<add>fs.mkdirSync.mockImplementation((dirPath, mode) => {
<add> const parentPath = path.dirname(dirPath);
<add> const node = getToNode(parentPath);
<add> if (!isDirNode(node)) {
<add> throw fsError('ENOTDIR', 'not a directory: ' + parentPath);
<add> }
<add> node[path.basename(dirPath)] = {};
<add>});
<add>
<add>function fsError(code, message) {
<add> const error = new Error(code + ': ' + message);
<add> error.code = code;
<add> return error;
<add>}
<add>
<add>function isDirNode(node) {
<add> return node && typeof node === 'object' && node.SYMLINK == null;
<add>}
<add>
<ide> function readlinkSync(filepath) {
<ide> const node = getToNode(filepath);
<ide> if (node !== null && typeof node === 'object' && !!node.SYMLINK) {
<ide> fs.close.mockImplementation((fd, callback = noop) => {
<ide> callback(null);
<ide> });
<ide>
<del>let filesystem;
<add>let filesystem = {};
<ide>
<ide> fs.createReadStream.mockImplementation(filepath => {
<ide> if (!filepath.startsWith('/')) {
<ide><path>local-cli/util/__tests__/fs-mock-test.js
<add>/**
<add> * Copyright (c) 2015-present, Facebook, Inc.
<add> * All rights reserved.
<add> *
<add> * This source code is licensed under the BSD-style license found in the
<add> * LICENSE file in the root directory of this source tree. An additional grant
<add> * of patent rights can be found in the PATENTS file in the same directory.
<add> *
<add> * @emails oncall+javascript_foundation
<add> * @flow
<add> * @format
<add> */
<add>
<add>'use strict';
<add>
<add>/* eslint-disable no-unclear-flowtypes */
<add>
<add>declare var jest: any;
<add>declare var describe: any;
<add>declare var it: any;
<add>
<add>jest.mock('fs');
<add>
<add>const fs = require('fs');
<add>
<add>describe('fs mock', () => {
<add> describe('writeFileSync()', () => {
<add> it('stores content correctly', () => {
<add> fs.writeFileSync('/test', 'foobar', 'utf8');
<add> const content = fs.readFileSync('/test', 'utf8');
<add> expect(content).toEqual('foobar');
<add> });
<add>
<add> it('fails on missing path', () => {
<add> expect(() =>
<add> fs.writeFileSync('/dir/test', 'foobar', 'utf8'),
<add> ).toThrowError('ENOENT: no such file or directory');
<add> });
<add> });
<add>
<add> describe('mkdirSync()', () => {
<add> it('creates folders that we can write files in', () => {
<add> fs.mkdirSync('/dir', 0o777);
<add> fs.writeFileSync('/dir/test', 'foobar', 'utf8');
<add> const content = fs.readFileSync('/dir/test', 'utf8');
<add> expect(content).toEqual('foobar');
<add> });
<add> });
<add>}); | 2 |
Javascript | Javascript | fix a todo and remove obsolete todos | 541d2192cf0325f6b085a33828b40a0e5b23b76a | <ide><path>lib/url.js
<ide> Url.prototype.parse = function parse(url, parseQueryString, slashesDenoteHost) {
<ide> // http://a@b@c/ => user:a@b host:c
<ide> // http://a@b?@c => user:a host:b path:/?@c
<ide>
<del> // v0.12 TODO(isaacs): This is not quite how Chrome does things.
<del> // Review our test case against browsers more comprehensively.
<del>
<ide> var hostEnd = -1;
<ide> var atSign = -1;
<ide> var nonHost = -1;
<ide><path>test/parallel/test-cluster-http-pipe.js
<ide> if (cluster.isMaster) {
<ide> http.createServer(common.mustCall((req, res) => {
<ide> assert.strictEqual(req.connection.remoteAddress, undefined);
<ide> assert.strictEqual(req.connection.localAddress, undefined);
<del> // TODO common.PIPE?
<ide>
<ide> res.writeHead(200);
<ide> res.end('OK');
<ide><path>test/parallel/test-fs-error-messages.js
<ide> if (!common.isAIX) {
<ide> );
<ide> }
<ide>
<del>// copyFile with invalid flags
<add>// Check copyFile with invalid flags.
<ide> {
<del> const validateError = (err) => {
<del> assert.strictEqual(err.message,
<del> 'EINVAL: invalid argument, copyfile ' +
<del> `'${existingFile}' -> '${nonexistentFile}'`);
<del> assert.strictEqual(err.errno, uv.UV_EINVAL);
<del> assert.strictEqual(err.code, 'EINVAL');
<del> assert.strictEqual(err.syscall, 'copyfile');
<del> return true;
<add> const validateError = {
<add> // TODO: Make sure the error message always also contains the src.
<add> message: `EINVAL: invalid argument, copyfile -> '${nonexistentFile}'`,
<add> errno: uv.UV_EINVAL,
<add> code: 'EINVAL',
<add> syscall: 'copyfile'
<ide> };
<ide>
<del> // TODO(joyeecheung): test fs.copyFile() when uv_fs_copyfile does not
<del> // keep the loop open when the flags are invalid.
<del> // See https://github.com/libuv/libuv/pull/1747
<add> fs.copyFile(existingFile, nonexistentFile, -1,
<add> common.expectsError(validateError));
<ide>
<add> validateError.message = 'EINVAL: invalid argument, copyfile ' +
<add> `'${existingFile}' -> '${nonexistentFile}'`;
<ide> assert.throws(
<ide> () => fs.copyFileSync(existingFile, nonexistentFile, -1),
<ide> validateError
<ide><path>test/parallel/test-util-isDeepStrictEqual.js
<ide> notUtilIsDeepStrict([1, , , 3], [1, , , 3, , , ]);
<ide> const err3 = new TypeError('foo1');
<ide> notUtilIsDeepStrict(err1, err2, assert.AssertionError);
<ide> notUtilIsDeepStrict(err1, err3, assert.AssertionError);
<del> // TODO: evaluate if this should throw or not. The same applies for RegExp
<del> // Date and any object that has the same keys but not the same prototype.
<ide> notUtilIsDeepStrict(err1, {}, assert.AssertionError);
<ide> }
<ide> | 4 |
Text | Text | release notes for 1.3.0-rc.2 | 798ed3be213275d22d014b3b57c6c6aff5430c13 | <ide><path>CHANGELOG.md
<del># NOTICE: Pending Breaking Change
<add><a name="1.3.0-rc.2"></a>
<add># 1.3.0-rc.2 tactile-perception (2014-09-16)
<ide>
<del>The next 1.3.0 release candidate (1.3.0-rc.2) will contain a perf-related change that is likely to
<del>introduce breakages in some applications. The change will affect filters and function call
<del>expressions, and will not call the function if the variables passed to the function are primitive
<del>values and have not changed since the last digest loop.
<ide>
<del>Example:
<add>## Bug Fixes
<add>
<add>- **$compile:** update `'@'`-bindings in controller when `bindToController` is `true`
<add> ([e7ac08a0](https://github.com/angular/angular.js/commit/e7ac08a0619d2bdc91c125d341772b4fbc0d5a78),
<add> [#9052](https://github.com/angular/angular.js/issues/9052), [#9077](https://github.com/angular/angular.js/issues/9077))
<add>- **$parse:** ensure CSP assignable expressions have `assign()`
<add> ([d13b4bd1](https://github.com/angular/angular.js/commit/d13b4bd1f5f2abaad00f5d1bf81f79549a8d0e46),
<add> [#9048](https://github.com/angular/angular.js/issues/9048))
<add>- **i18n:** fix typo at i18n generation code
<add> ([eb4afd45](https://github.com/angular/angular.js/commit/eb4afd45f77d7d67744e01ce63a831c13c2b22e8))
<add>- **input:** always pass in the model value to `ctrl.$isEmpty`
<add> ([3e51b84b](https://github.com/angular/angular.js/commit/3e51b84bc19f7e6acc61cb536ddcdbfed307c831),
<add> [#5164](https://github.com/angular/angular.js/issues/5164), [#9017](https://github.com/angular/angular.js/issues/9017))
<add>- **jqLite:** fix `event.stopImmediatePropagation()` so it works as expected
<add> ([30354c58](https://github.com/angular/angular.js/commit/30354c58fe2bd371df364f7a3f55b270692a4051),
<add> [#4833](https://github.com/angular/angular.js/issues/4833))
<add>- **ngLocale:** Regenerate Locale Files
<add> ([6a96a820](https://github.com/angular/angular.js/commit/6a96a8200aff4749bc84c44a1e8018b09d9ebdb4),
<add> [#8931](https://github.com/angular/angular.js/issues/8931), [#8583](https://github.com/angular/angular.js/issues/8583), [#7799](https://github.com/angular/angular.js/issues/7799))
<add>- **ngModel:**
<add> - do not reset bound date objects
<add> ([1a1ef629](https://github.com/angular/angular.js/commit/1a1ef62903c8fdf4ceb81277d966a8eff67f0a96),
<add> [#6666](https://github.com/angular/angular.js/issues/6666))
<add> - don’t clear the model when an external validator failed
<add> ([9314719d](https://github.com/angular/angular.js/commit/9314719d1eb5f480b877f5513f6e0e474edcb67d),
<add> [#8357](https://github.com/angular/angular.js/issues/8357), [#8080](https://github.com/angular/angular.js/issues/8080))
<add>- **ngResource:** make badcfg error message more helpful
<add> ([a3962f0d](https://github.com/angular/angular.js/commit/a3962f0df3f9b8382b47952f9e4fcb48a4cc098b),
<add> [#9005](https://github.com/angular/angular.js/issues/9005), [#9010](https://github.com/angular/angular.js/issues/9010))
<add>- **select:** update option labels when model changes
<add> ([46274102](https://github.com/angular/angular.js/commit/46274102454038ee7fd4543a32166e9bbbc98904),
<add> [#9025](https://github.com/angular/angular.js/issues/9025))
<add>
<add>
<add>## Features
<add>
<add>- **limitTo:** support numeric input to limitTo
<add> ([1c8a7459](https://github.com/angular/angular.js/commit/1c8a7459c90efc77b1a0987f976e3bddab4565fe),
<add> [#8926](https://github.com/angular/angular.js/issues/8926))
<add>- **ngInclude:** add template url parameter to events
<add> ([fd2d6c02](https://github.com/angular/angular.js/commit/fd2d6c02f9654e753d3655a3377a9534f7a54de3),
<add> [#8453](https://github.com/angular/angular.js/issues/8453), [#8454](https://github.com/angular/angular.js/issues/8454))
<ide>
<del>```html
<del>//date filter would only be called if the 'timeCreated' property has changed
<del><span ng-bind="timeCreated|date"></span>
<ide>
<del>//custom filter would break if depends on data changed by user other than 'cost'
<del><span ng-bind="cost|i18nLocalizer">
<add>## Performance Improvements
<add>
<add>- **$compile:** move `$$isolateBinding` creation to directive factory instead of on each link
<add> ([56f09f0b](https://github.com/angular/angular.js/commit/56f09f0b44048b62f964d29db4d3d2630662f6ea))
<add>- **$parse:**
<add> - execute watched expressions only when the inputs change
<add> ([fca6be71](https://github.com/angular/angular.js/commit/fca6be71274e537c7df86ae9e27a3bd1597e9ffa),
<add> [#9006](https://github.com/angular/angular.js/issues/9006), [#9082](https://github.com/angular/angular.js/issues/9082))
<add> - remove `binaryFn` and `valueFn` wrappers from filter expressions
<add> ([67919c80](https://github.com/angular/angular.js/commit/67919c808771a9b185a9d552cd32a90748d36666))
<add>
<add>
<add>## Breaking Changes
<add>
<add>- **$parse:** due to [fca6be71](https://github.com/angular/angular.js/commit/fca6be71274e537c7df86ae9e27a3bd1597e9ffa),
<add> all filters are assumed to be stateless functions
<add>
<add>Previously it was just a good practice to make all filters stateless. Now
<add>it's a requirement in order for the model change-observation to pick up
<add>all changes.
<add>
<add>If an existing filter is statefull, it can be flagged as such but keep in
<add>mind that this will result in a significant performance-penalty (or rather
<add>lost opportunity to benefit from a major perf improvement) that will
<add>affect the `$digest` duration.
<add>
<add>To flag a filter as stateful do the following:
<add>
<add>```javascript
<add>myApp.filter('myFilter', function() {
<add> function myFilter(input) { ... };
<add> myFilter.$stateful = true;
<add> return myFilter;
<add>});
<ide> ```
<ide>
<ide>
<add>
<ide> <a name="1.3.0-rc.1"></a>
<ide> # 1.3.0-rc.1 backyard-atomicity (2014-09-09)
<ide> | 1 |
Javascript | Javascript | update viewconfigignore comment | e254073b1748e99f1a8aa7dfb4535b9b8d8efd3c | <ide><path>Libraries/NativeComponent/ViewConfigIgnore.js
<ide> export function DynamicallyInjectedByGestureHandler<T: {...}>(object: T): T {
<ide> }
<ide>
<ide> /**
<del> * On iOS, ViewManager events declarations generate {eventName}: true entries
<del> * in ViewConfig valueAttributes. In our Static ViewConfig infra, we generate
<del> * these {eventName}: true entries during runtime by inspecting a ViewConfig's
<del> * bubblingEventTypes, and directEventTypes.
<add> * On iOS, ViewManager event declarations generate {eventName}: true entries
<add> * in ViewConfig valueAttributes. These entries aren't generated for Android.
<add> * This annotation allows Static ViewConfigs to insert these entries into
<add> * iOS but not Android.
<ide> *
<del> * However, not all event declarations generate these {eventName}: true entries.
<del> * So, the ViewConfig infra generates extra {eventName}: true entries for some
<del> * events. These extra entries are harmless. So, the logic below makes the ViewConfig
<del> * Validator ignore all extra {eventName}: true entries in static ViewConfig
<del> * validAttributes.
<add> * In the future, we want to remove this platform-inconsistency.
<add> * This annotation also allows us to safely test this removal by setting
<add> * global.RN$ViewConfigEventValidAttributesDisabled = true server-side.
<ide> *
<del> * TODO(T110872225): Remove this logic
<add> * TODO(T110872225): Remove this logic, after achieving platform-consistency
<ide> */
<ide> export function ConditionallyIgnoredEventHandlers<T: {[name: string]: true}>(
<ide> value: T, | 1 |
PHP | PHP | remove some unused variables in functions | da9970de40d99e58843b27e9f01f56bfcf1789ff | <ide><path>src/Illuminate/Foundation/ViewPublisher.php
<ide> public function publish($package, $source)
<ide> */
<ide> public function publishPackage($package, $packagePath = null)
<ide> {
<del> list($vendor, $name) = explode('/', $package);
<del>
<del> $source = $this->getSource($package, $name, $packagePath ?: $this->packagePath);
<add> $source = $this->getSource($package, $packagePath ?: $this->packagePath);
<ide>
<ide> return $this->publish($package, $source);
<ide> }
<ide> public function publishPackage($package, $packagePath = null)
<ide> * Get the source views directory to publish.
<ide> *
<ide> * @param string $package
<del> * @param string $name
<ide> * @param string $packagePath
<ide> * @return string
<ide> *
<ide> * @throws \InvalidArgumentException
<ide> */
<del> protected function getSource($package, $name, $packagePath)
<add> protected function getSource($package, $packagePath)
<ide> {
<ide> $source = $packagePath."/{$package}/src/views";
<ide> | 1 |
Ruby | Ruby | fetch bottles with --retry | 033964fe87afe7ee95837c4e0050d3c6253e03c2 | <ide><path>Library/Homebrew/cmd/pull.rb
<ide> def pull
<ide> "https://api.bintray.com/content/homebrew/#{repo}/#{package}/#{version}/publish"
<ide> puts
<ide> sleep 2
<del> safe_system "brew", "fetch", "--force-bottle", f.name
<add> safe_system "brew", "fetch", "--retry", "--force-bottle", f.name
<ide> end
<ide> else
<ide> opoo "You must set BINTRAY_USER and BINTRAY_KEY to add or update bottles on Bintray!" | 1 |
Text | Text | add a codetriage badge to freecodecamp | c38173c97621b9902ff3583e86eefe1a3712e30c | <ide><path>README.md
<ide> [](https://travis-ci.org/freeCodeCamp/freeCodeCamp)
<ide> [](http://makeapullrequest.com)
<ide> [](http://www.firsttimersonly.com/)
<del>[](https://snyk.io/test/github/freecodecamp/freecodecamp)
<del>
<add>[](https://snyk.io/test/github/freecodecamp/freecodecamp[)
<add>[](https://www.codetriage.com/freecodecamp/freecodecamp)
<ide>
<ide> ## Welcome to freeCodeCamp.org's open source codebase and curriculum!
<ide> | 1 |
Javascript | Javascript | correct typos in the test assertion | 012bda75f1941eb60869be1e4b6606aa40b17024 | <ide><path>test/unit/effects.js
<ide> asyncTest("line-height animates correctly (#13855)", 12, function() {
<ide> initial = initialHeight[ i ],
<ide> height = jQuery( this ).height();
<ide> ok( height < initial, "hide " + label + ": upper bound; height: " + height + "; intitial: " + initial );
<del> ok( height > initial / 3, "hide " + label + ": lower bound; height: " + height + "; intitial/3: " + initial );
<add> ok( height > initial / 2, "hide " + label + ": lower bound; height: " + height + "; intitial/2: " + initial / 2 );
<ide> });
<ide> animated.stop( true, true ).hide().animate( { "line-height": "show" }, 1500 );
<ide> setTimeout(function() { | 1 |
Python | Python | remove use of ``reload`` from f2py. see gh-4139 | 9dbeb2ef07c0afadbe368a7e73baa428b3760816 | <ide><path>numpy/f2py/crackfortran.py
<ide> ignorecontains=1
<ide> dolowercase=1
<ide> debug=[]
<del>## do_analyze = 1
<del>
<del>###### global variables
<del>
<del>## use reload(crackfortran) to reset these variables
<ide>
<add># Global variables
<ide> groupcounter=0
<ide> grouplist={groupcounter:[]}
<ide> neededmodule=-1
<ide> include_paths=[]
<ide> previous_context = None
<ide>
<add>
<add>def reset_global_f2py_vars():
<add> global groupcounter, grouplist, neededmodule, expectbegin, \
<add> skipblocksuntil, usermodules, f90modulevars, gotnextfile, \
<add> filepositiontext, currentfilename, skipfunctions, skipfuncs, \
<add> onlyfuncs, include_paths, previous_context, \
<add> strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename, \
<add> f77modulename, skipemptyends, ignorecontains, dolowercase, debug
<add>
<add> # flags
<add> strictf77 = 1
<add> sourcecodeform = 'fix'
<add> quiet = 0
<add> verbose = 1
<add> tabchar = 4*' '
<add> pyffilename = ''
<add> f77modulename = ''
<add> skipemptyends = 0
<add> ignorecontains = 1
<add> dolowercase = 1
<add> debug = []
<add> # variables
<add> groupcounter = 0
<add> grouplist = {groupcounter:[]}
<add> neededmodule =-1
<add> expectbegin = 1
<add> skipblocksuntil = -1
<add> usermodules = []
<add> f90modulevars = {}
<add> gotnextfile = 1
<add> filepositiontext = ''
<add> currentfilename = ''
<add> skipfunctions = []
<add> skipfuncs = []
<add> onlyfuncs = []
<add> include_paths = []
<add> previous_context = None
<add>
<add>
<ide> ###### Some helper functions
<ide> def show(o,f=0):pprint.pprint(o)
<ide> errmess=sys.stderr.write
<ide><path>numpy/f2py/f2py2e.py
<ide> def run_main(comline_list):
<ide> """Run f2py as if string.join(comline_list,' ') is used as a command line.
<ide> In case of using -h flag, return None.
<ide> """
<del> if sys.version_info[0] >= 3:
<del> import imp
<del> imp.reload(crackfortran)
<del> else:
<del> reload(crackfortran)
<add> crackfortran.reset_global_f2py_vars()
<ide> f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__))
<ide> fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h')
<ide> fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') | 2 |
Ruby | Ruby | fix relocation of duplicate `rpath`s | e8b5eb7e42c925b7cc10c78a029b8c70e4d7965b | <ide><path>Library/Homebrew/extend/os/mac/keg_relocate.rb
<ide> def fixed_name(file, bad_name)
<ide> def each_linkage_for(file, linkage_type, &block)
<ide> links = file.method(linkage_type)
<ide> .call
<add> .uniq
<ide> .reject { |fn| fn =~ /^@(loader_|executable_|r)path/ }
<ide> links.each(&block)
<ide> end | 1 |
Javascript | Javascript | correct fn parameters for js animation | b78b12976a952998216cd895862d388d6a2d56a4 | <ide><path>src/ng/animate.js
<ide> var $AnimateProvider = ['$provide', function($provide) {
<ide> *
<ide> * @description Performs an inline animation on the element which applies the provided to and from CSS styles to the element.
<ide> * If any detected CSS transition, keyframe or JavaScript matches the provided className value, then the animation will take
<del> * on the provided styles. For example, if a transition animation is set for the given className then the provided `from` and
<add> * on the provided styles. For example, if a transition animation is set for the given classNamem, then the provided `from` and
<ide> * `to` styles will be applied alongside the given transition. If the CSS style provided in `from` does not have a corresponding
<ide> * style in `to`, the style in `from` is applied immediately, and no animation is run.
<ide> * If a JavaScript animation is detected then the provided styles will be given in as function parameters into the `animate`
<ide> var $AnimateProvider = ['$provide', function($provide) {
<ide> * ```js
<ide> * ngModule.animation('.my-inline-animation', function() {
<ide> * return {
<del> * animate : function(element, className, from, to, done) {
<del> * //styles
<add> * animate : function(element, from, to, done, options) {
<add> * //animation
<add> * done();
<ide> * }
<ide> * }
<ide> * }); | 1 |
PHP | PHP | increase memory margin | fbd1f7184f672be64b51ccdb9af3b47c0e84baf6 | <ide><path>tests/TestCase/ORM/QueryRegressionTest.php
<ide> public function testFormatResultsMemoryLeak()
<ide> }
<ide> gc_collect_cycles();
<ide> $endMemory = memory_get_usage() / 1024 / 1024;
<del> $this->assertWithinRange($endMemory, $memory, 1.25, 'Memory leak in ResultSet');
<add> $this->assertWithinRange($endMemory, $memory, 1.5, 'Memory leak in ResultSet');
<ide> }
<ide>
<ide> /** | 1 |
PHP | PHP | add tests for new route caching | eca35179ed82a32ba90532ae21a08178581a461b | <ide><path>src/Illuminate/Routing/CompiledRouteCollection.php
<ide> public function getByName($name)
<ide> public function getByAction($action)
<ide> {
<ide> $attributes = collect($this->attributes)->first(function (array $attributes) use ($action) {
<del> return $attributes['action']['controller'] === $action;
<add> if (isset($attributes['action']['controller'])) {
<add> return $attributes['action']['controller'] === $action;
<add> }
<add>
<add> return $attributes['action']['uses'] === $action;
<ide> });
<ide>
<ide> return $attributes ? $this->newRoute($attributes) : null;
<ide><path>tests/Integration/Routing/CompiledRouteCollectionTest.php
<ide> namespace Illuminate\Tests\Routing;
<ide>
<ide> use ArrayIterator;
<add>use Illuminate\Http\Request;
<ide> use Illuminate\Routing\CompiledRouteCollection;
<ide> use Illuminate\Routing\Route;
<add>use Illuminate\Support\Arr;
<ide> use Illuminate\Tests\Integration\IntegrationTest;
<add>use Symfony\Component\HttpKernel\Exception\MethodNotAllowedHttpException;
<add>use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
<ide>
<ide> class CompiledRouteCollectionTest extends IntegrationTest
<ide> {
<ide> public function testRouteCollectionCanRetrieveByAction()
<ide> {
<ide> $this->routeCollection->add($routeIndex = $this->newRoute('GET', 'foo/index', $action = [
<ide> 'uses' => 'FooController@index',
<del> 'as' => 'route_name',
<ide> ]));
<ide>
<del> $this->assertSame($action, $routeIndex->getAction());
<add> $route = $this->routeCollection->getByAction('FooController@index');
<add>
<add> $this->assertSame($action, Arr::except($routeIndex->getAction(), 'as'));
<add> $this->assertSame($action, Arr::except($route->getAction(), 'as'));
<ide> }
<ide>
<ide> public function testRouteCollectionCanGetIterator()
<ide> public function testRouteCollectionCleansUpOverwrittenRoutes()
<ide> $this->assertEquals($routeB, $this->routeCollection->getByAction('OverwrittenView@view'));
<ide> }
<ide>
<add> public function testMatchingThrowsNotFoundExceptionWhenRouteIsNotFound()
<add> {
<add> $this->routeCollection->add($this->newRoute('GET', '/', ['uses' => 'FooController@index']));
<add>
<add> $this->expectException(NotFoundHttpException::class);
<add>
<add> $this->routeCollection->match(Request::create('/foo'));
<add> }
<add>
<add> public function testMatchingThrowsMethodNotAllowedHttpExceptionWhenMethodIsNotAllowed()
<add> {
<add> $this->routeCollection->add($this->newRoute('POST', '/foo', ['uses' => 'FooController@index']));
<add>
<add> $this->expectException(MethodNotAllowedHttpException::class);
<add>
<add> $this->routeCollection->match(Request::create('/foo'));
<add> }
<add>
<add> public function testSlashPrefixIsProperly()
<add> {
<add> $this->routeCollection->add($this->newRoute('GET', 'foo/bar', ['uses' => 'FooController@index', 'prefix' => '/']));
<add>
<add> $route = $this->routeCollection->getByAction('FooController@index');
<add>
<add> $this->assertEquals('foo/bar', $route->uri());
<add> }
<add>
<add> public function testRouteBindingsAreProperlySaved()
<add> {
<add> $this->routeCollection->add($this->newRoute('GET', 'posts/{post:slug}/show', [
<add> 'uses' => 'FooController@index',
<add> 'prefix' => 'profile/{user:username}',
<add> 'as' => 'foo',
<add> ]));
<add>
<add> $route = $this->routeCollection->getByName('foo');
<add>
<add> $this->assertEquals('profile/{user}/posts/{post}/show', $route->uri());
<add> $this->assertSame(['user' => 'username', 'post' => 'slug'], $route->bindingFields());
<add> }
<add>
<ide> /**
<ide> * Create a new Route object.
<ide> *
<ide><path>tests/Routing/RouteCollectionTest.php
<ide> use ArrayIterator;
<ide> use Illuminate\Routing\Route;
<ide> use Illuminate\Routing\RouteCollection;
<add>use LogicException;
<ide> use PHPUnit\Framework\TestCase;
<ide>
<ide> class RouteCollectionTest extends TestCase
<ide> public function testRouteCollectionCleansUpOverwrittenRoutes()
<ide> $this->assertEquals($routeB, $this->routeCollection->getByName('overwrittenRouteA'));
<ide> $this->assertEquals($routeB, $this->routeCollection->getByAction('OverwrittenView@view'));
<ide> }
<add>
<add> public function testCannotCacheDuplicateRouteNames()
<add> {
<add> $this->routeCollection->add(
<add> new Route('GET', 'users', ['uses' => 'UsersController@index', 'as' => 'users'])
<add> );
<add> $this->routeCollection->add(
<add> new Route('GET', 'users/{user}', ['uses' => 'UsersController@show', 'as' => 'users'])
<add> );
<add>
<add> $this->expectException(LogicException::class);
<add>
<add> $this->routeCollection->compile();
<add> }
<ide> }
<ide><path>tests/Routing/RoutingRouteTest.php
<ide> public function testRoutePrefixing()
<ide> $routes = $routes->getRoutes();
<ide> $routes[0]->prefix('prefix');
<ide> $this->assertSame('prefix', $routes[0]->uri());
<add>
<add> /*
<add> * Prefix homepage with empty prefix
<add> */
<add> $router = $this->getRouter();
<add> $router->get('/', function () {
<add> return 'hello';
<add> });
<add> $routes = $router->getRoutes();
<add> $routes = $routes->getRoutes();
<add> $routes[0]->prefix('/');
<add> $this->assertSame('/', $routes[0]->uri());
<ide> }
<ide>
<ide> public function testRoutePreservingOriginalParametersState() | 4 |
Go | Go | fix a typo | 17f39dcb4d33a4cd043949a0142f2e0deb972312 | <ide><path>daemon/images/image_pull.go
<ide> func (i *ImageService) PullImage(ctx context.Context, image, tag string, platfor
<ide> // is a single-arch image, in which case (for backward compatibility),
<ide> // we allow the image to have a non-matching architecture. The code
<ide> // below checks for this situation, and returns a warning to the client,
<del> // as well ass logs it to the daemon logs.
<add> // as well as logging it to the daemon logs.
<ide> img, err := i.GetImage(image, platform)
<ide>
<ide> // Note that this is a special case where GetImage returns both an image | 1 |
PHP | PHP | improve doc comments | f4a260ede060354231e5c78a19202dbde50b6040 | <ide><path>src/Error/ExceptionTrap.php
<ide> * handler to handle fatal errors. When exceptions are trapped
<ide> * they are 'rendered' using the defined renderers and logged
<ide> * if logging is enabled.
<add> *
<add> * Exceptions will be logged, then call attached callbacks
<add> * and finally render an error page using the configured
<add> * `exceptionRenderer`.
<add> *
<add> * If undefined, an ExceptionRenderer will be selected
<add> * based on the current SAPI (CLI or Web).
<ide> */
<ide> class ExceptionTrap
<ide> {
<ide> public function addCallback(Closure $closure)
<ide> public function register(): void
<ide> {
<ide> set_exception_handler([$this, 'handleException']);
<add> // TODO handle fatal errors.
<ide> }
<ide>
<ide> /**
<ide> public function logException(Throwable $exception, ?ServerRequest $request = nul
<ide> /**
<ide> * Trigger an error that occurred during rendering an exception.
<ide> *
<add> * By triggering an E_USER_ERROR we can end up in the default
<add> * exception handling which will log the rendering failure,
<add> * and hopefully render an error page.
<add> *
<ide> * @param \Throwable $exception Exception to log
<ide> * @return void
<ide> */
<ide><path>src/Error/Renderer/TextExceptionRenderer.php
<ide> *
<ide> * Useful in CI or plain text environments.
<ide> *
<del> * @todo 5.0 Implement \Cake\Error\ErrorRendererInterface. This implementation can't implement
<add> * @todo 5.0 Implement \Cake\Error\ExceptionRendererInterface. This implementation can't implement
<ide> * the concrete interface because the return types are not compatible.
<ide> */
<ide> class TextExceptionRenderer
<ide> public function __construct(Throwable $error)
<ide> /**
<ide> * Render an exception into a plain text message.
<ide> *
<del> * @return string
<add> * @return \Psr\Http\Message\ResponseInterface|string
<ide> */
<ide> public function render()
<ide> { | 2 |
Text | Text | add notes about more specific options for axes | 42e85942a852d3082907e0a8eafb75d3b15e554a | <ide><path>docs/axes/cartesian/index.md
<ide> module.exports = {
<ide>
<ide> ## Common Configuration
<ide>
<add>:::tip Note
<add>These are only the common options supported by all cartesian axes. Please see the specific axis documentation for all the available options for that axis.
<add>:::
<add>
<ide> !!!include(axes/cartesian/_common.md)!!!
<ide>
<ide> !!!include(axes/_common.md)!!!
<ide> The `bounds` property controls the scale boundary strategy (bypassed by `min`/`m
<ide>
<ide> ### Tick Configuration
<ide>
<add>:::tip Note
<add>These are only the common tick options supported by all cartesian axes. Please see specific axis documentation for all of the available options for that axis.
<add>:::
<add>
<ide> !!!include(axes/cartesian/_common_ticks.md)!!!
<ide>
<ide> !!!include(axes/_common_ticks.md)!!!
<ide> module.exports = {
<ide>
<ide> :::tip Note
<ide> The `crossAlign` setting is only effective when these preconditions are met:
<del> * tick rotation is `0`
<del> * axis position is `'top'`, '`left'`, `'bottom'` or `'right'`
<add>
<add>* tick rotation is `0`
<add>* axis position is `'top'`, '`left'`, `'bottom'` or `'right'`
<ide> :::
<ide>
<ide> ### Axis ID
<ide><path>docs/axes/index.md
<ide> let chart = new Chart(ctx, {
<ide>
<ide> ## Common Configuration
<ide>
<add>:::tip Note
<add>These are only the common options supported by all axes. Please see specific axis documentation for all of the available options for that axis.
<add>:::
<add>
<ide> !!!include(axes/_common.md)!!!
<ide>
<ide> ## Tick Configuration
<ide>
<add>:::tip Note
<add>These are only the common tick options supported by all axes. Please see specific axis documentation for all of the available tick options for that axis.
<add>:::
<add>
<ide> !!!include(axes/_common_ticks.md)!!!
<ide>
<ide> ## Axis Range Settings | 2 |
Python | Python | add update_zone method to linode dns driver | 7e20b09d6b5b40b5d695b3ada3490e122bd7c872 | <ide><path>libcloud/dns/drivers/linode.py
<ide> def create_zone(self, domain, type='master', ttl=None, extra=None):
<ide> extra=merged, driver=self)
<ide> return zone
<ide>
<add> def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
<add> """
<add> Update an existing zone.
<add>
<add> API docs: http://www.linode.com/api/dns/domain.update
<add> """
<add> params = {'api_action': 'domain.update', 'DomainID': zone.id,
<add> 'Type': type}
<add>
<add> if type:
<add> params['Type'] = type
<add>
<add> if domain:
<add> params['Domain'] = domain
<add>
<add> if ttl:
<add> params['TTL_sec'] = ttl
<add>
<add> merged = self._merge_valid_keys(params=params,
<add> valid_keys=VALID_ZONE_EXTRA_PARAMS,
<add> extra=extra)
<add> data = self.connection.request(API_ROOT, params=params).objects[0]
<add> updated_zone = self._get_new_obj(obj=zone, klass=Zone,
<add> attributes={'domain': domain,
<add> 'type': type, 'ttl': ttl,
<add> 'extra': merged})
<add> return updated_zone
<add>
<ide> def create_record(self, name, zone, type, data, extra=None):
<ide> """
<ide> Create a new record.
<ide> def _merge_valid_keys(self, params, valid_keys, extra):
<ide>
<ide> return merged
<ide>
<add> def _get_new_obj(self, obj, klass, attributes):
<add> """
<add> Pass attributes from the existing object 'obj' and attributes
<add> dictionary to a 'klass' constructor.
<add> Attributes from 'attributes' dictionary are only passed to the
<add> constructor if they are not None.
<add> """
<add> kwargs = {}
<add> for key, value in obj.__dict__.items():
<add> if isinstance(value, dict):
<add> kwargs[key] = value.copy()
<add> elif isinstance(value, (tuple, list)):
<add> kwargs[key] = value[:]
<add> else:
<add> kwargs[key] = value
<add>
<add> for key, value in attributes.items():
<add> if value is None:
<add> continue
<add>
<add> if isinstance(value, dict):
<add> kwargs_value = kwargs.get(key, {})
<add> for key1, value2 in value.items():
<add> if value2 is None:
<add> continue
<add>
<add> kwargs_value[key1] = value2
<add> kwargs[key] = kwargs_value
<add> else:
<add> kwargs[key] = value
<add>
<add> return klass(**kwargs)
<add>
<ide> def _to_zones(self, items):
<ide> """
<ide> Convert a list of items to the Zone objects.
<ide> def _to_zone(self, item):
<ide> """
<ide> Build an Zone object from the item dictionary.
<ide> """
<del> extra = {'soa_email': item['SOA_EMAIL'], 'status': item['STATUS'],
<add> extra = {'SOA_Email': item['SOA_EMAIL'], 'status': item['STATUS'],
<ide> 'description': item['DESCRIPTION']}
<ide> zone = Zone(id=item['DOMAINID'], domain=item['DOMAIN'],
<ide> type=item['TYPE'], ttl=item['TTL_SEC'], extra=extra, | 1 |
Java | Java | provide a flag to disable xml support | 1e501f2583efd7521fe457453e5f866bcb8c509a | <ide><path>spring-beans/src/main/java/org/springframework/beans/PropertyEditorRegistrySupport.java
<ide> /*
<del> * Copyright 2002-2018 the original author or authors.
<add> * Copyright 2002-2020 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> import org.springframework.beans.propertyeditors.URLEditor;
<ide> import org.springframework.beans.propertyeditors.UUIDEditor;
<ide> import org.springframework.beans.propertyeditors.ZoneIdEditor;
<add>import org.springframework.core.SpringProperties;
<ide> import org.springframework.core.convert.ConversionService;
<ide> import org.springframework.core.io.Resource;
<ide> import org.springframework.core.io.support.ResourceArrayPropertyEditor;
<ide> *
<ide> * @author Juergen Hoeller
<ide> * @author Rob Harrop
<add> * @author Sebastien Deleuze
<ide> * @since 1.2.6
<ide> * @see java.beans.PropertyEditorManager
<ide> * @see java.beans.PropertyEditorSupport#setAsText
<ide> * @see java.beans.PropertyEditorSupport#setValue
<ide> */
<ide> public class PropertyEditorRegistrySupport implements PropertyEditorRegistry {
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<add>
<ide> @Nullable
<ide> private ConversionService conversionService;
<ide>
<ide> private void createDefaultEditors() {
<ide> this.defaultEditors.put(Currency.class, new CurrencyEditor());
<ide> this.defaultEditors.put(File.class, new FileEditor());
<ide> this.defaultEditors.put(InputStream.class, new InputStreamEditor());
<del> this.defaultEditors.put(InputSource.class, new InputSourceEditor());
<add> if (!shouldIgnoreXml) {
<add> this.defaultEditors.put(InputSource.class, new InputSourceEditor());
<add> }
<ide> this.defaultEditors.put(Locale.class, new LocaleEditor());
<ide> this.defaultEditors.put(Path.class, new PathEditor());
<ide> this.defaultEditors.put(Pattern.class, new PatternEditor());
<ide><path>spring-core/src/main/java/org/springframework/core/io/support/PropertiesLoaderUtils.java
<ide> import java.util.Enumeration;
<ide> import java.util.Properties;
<ide>
<add>import org.springframework.core.SpringProperties;
<ide> import org.springframework.core.io.Resource;
<ide> import org.springframework.lang.Nullable;
<ide> import org.springframework.util.Assert;
<ide> *
<ide> * @author Juergen Hoeller
<ide> * @author Rob Harrop
<add> * @author Sebastien Deleuze
<ide> * @since 2.0
<ide> * @see PropertiesLoaderSupport
<ide> */
<ide> public abstract class PropertiesLoaderUtils {
<ide>
<ide> private static final String XML_FILE_EXTENSION = ".xml";
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<ide>
<ide> /**
<ide> * Load properties from the given EncodedResource,
<ide> static void fillProperties(Properties props, EncodedResource resource, Propertie
<ide> try {
<ide> String filename = resource.getResource().getFilename();
<ide> if (filename != null && filename.endsWith(XML_FILE_EXTENSION)) {
<add> if (shouldIgnoreXml) {
<add> throw new UnsupportedOperationException("XML support disabled");
<add> }
<ide> stream = resource.getInputStream();
<ide> persister.loadFromXml(props, stream);
<ide> }
<ide> public static void fillProperties(Properties props, Resource resource) throws IO
<ide> try (InputStream is = resource.getInputStream()) {
<ide> String filename = resource.getFilename();
<ide> if (filename != null && filename.endsWith(XML_FILE_EXTENSION)) {
<add> if (shouldIgnoreXml) {
<add> throw new UnsupportedOperationException("XML support disabled");
<add> }
<ide> props.loadFromXML(is);
<ide> }
<ide> else {
<ide> public static Properties loadAllProperties(String resourceName, @Nullable ClassL
<ide> ResourceUtils.useCachesIfNecessary(con);
<ide> try (InputStream is = con.getInputStream()) {
<ide> if (resourceName.endsWith(XML_FILE_EXTENSION)) {
<add> if (shouldIgnoreXml) {
<add> throw new UnsupportedOperationException("XML support disabled");
<add> }
<ide> props.loadFromXML(is);
<ide> }
<ide> else {
<ide><path>spring-core/src/main/java/org/springframework/util/DefaultPropertiesPersister.java
<ide> /*
<del> * Copyright 2002-2019 the original author or authors.
<add> * Copyright 2002-2020 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> import java.io.Writer;
<ide> import java.util.Properties;
<ide>
<add>import org.springframework.core.SpringProperties;
<add>
<ide> /**
<ide> * Default implementation of the {@link PropertiesPersister} interface.
<ide> * Follows the native parsing of {@code java.util.Properties}.
<ide> * "defaultEncoding" and "fileEncodings" properties).
<ide> *
<ide> * @author Juergen Hoeller
<add> * @author Sebastien Deleuze
<ide> * @since 10.03.2004
<ide> * @see java.util.Properties
<ide> * @see java.util.Properties#load
<ide> * @see java.util.Properties#store
<ide> */
<ide> public class DefaultPropertiesPersister implements PropertiesPersister {
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<add>
<ide> @Override
<ide> public void load(Properties props, InputStream is) throws IOException {
<ide> props.load(is);
<ide> public void store(Properties props, Writer writer, String header) throws IOExcep
<ide>
<ide> @Override
<ide> public void loadFromXml(Properties props, InputStream is) throws IOException {
<add> if (shouldIgnoreXml) {
<add> throw new UnsupportedOperationException("XML support disabled");
<add> }
<ide> props.loadFromXML(is);
<ide> }
<ide>
<ide> @Override
<ide> public void storeToXml(Properties props, OutputStream os, String header) throws IOException {
<add> if (shouldIgnoreXml) {
<add> throw new UnsupportedOperationException("XML support disabled");
<add> }
<ide> props.storeToXML(os, header);
<ide> }
<ide>
<ide> @Override
<ide> public void storeToXml(Properties props, OutputStream os, String header, String encoding) throws IOException {
<add> if (shouldIgnoreXml) {
<add> throw new UnsupportedOperationException("XML support disabled");
<add> }
<ide> props.storeToXML(os, header, encoding);
<ide> }
<ide>
<ide><path>spring-web/src/main/java/org/springframework/http/codec/support/BaseDefaultCodecs.java
<ide> import java.util.List;
<ide> import java.util.Map;
<ide>
<add>import org.springframework.core.SpringProperties;
<ide> import org.springframework.core.codec.AbstractDataBufferDecoder;
<ide> import org.springframework.core.codec.ByteArrayDecoder;
<ide> import org.springframework.core.codec.ByteArrayEncoder;
<ide> */
<ide> class BaseDefaultCodecs implements CodecConfigurer.DefaultCodecs, CodecConfigurer.DefaultCodecConfig {
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<ide> static final boolean jackson2Present;
<ide>
<ide> private static final boolean jackson2SmilePresent;
<ide> private void initCodec(@Nullable Object codec) {
<ide> ((AbstractJackson2Decoder) codec).setMaxInMemorySize(size);
<ide> }
<ide> }
<del> if (jaxb2Present) {
<add> if (jaxb2Present && !shouldIgnoreXml) {
<ide> if (codec instanceof Jaxb2XmlDecoder) {
<ide> ((Jaxb2XmlDecoder) codec).setMaxInMemorySize(size);
<ide> }
<ide> final List<HttpMessageReader<?>> getObjectReaders() {
<ide> addCodec(readers, new DecoderHttpMessageReader<>(this.jackson2SmileDecoder != null ?
<ide> (Jackson2SmileDecoder) this.jackson2SmileDecoder : new Jackson2SmileDecoder()));
<ide> }
<del> if (jaxb2Present) {
<add> if (jaxb2Present && !shouldIgnoreXml) {
<ide> addCodec(readers, new DecoderHttpMessageReader<>(this.jaxb2Decoder != null ?
<ide> (Jaxb2XmlDecoder) this.jaxb2Decoder : new Jaxb2XmlDecoder()));
<ide> }
<ide> final List<HttpMessageWriter<?>> getBaseObjectWriters() {
<ide> writers.add(new EncoderHttpMessageWriter<>(this.jackson2SmileEncoder != null ?
<ide> (Jackson2SmileEncoder) this.jackson2SmileEncoder : new Jackson2SmileEncoder()));
<ide> }
<del> if (jaxb2Present) {
<add> if (jaxb2Present && !shouldIgnoreXml) {
<ide> writers.add(new EncoderHttpMessageWriter<>(this.jaxb2Encoder != null ?
<ide> (Jaxb2XmlEncoder) this.jaxb2Encoder : new Jaxb2XmlEncoder()));
<ide> }
<ide><path>spring-web/src/main/java/org/springframework/http/converter/support/AllEncompassingFormHttpMessageConverter.java
<ide> /*
<del> * Copyright 2002-2018 the original author or authors.
<add> * Copyright 2002-2020 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide>
<ide> package org.springframework.http.converter.support;
<ide>
<add>import org.springframework.core.SpringProperties;
<ide> import org.springframework.http.converter.FormHttpMessageConverter;
<ide> import org.springframework.http.converter.json.GsonHttpMessageConverter;
<ide> import org.springframework.http.converter.json.JsonbHttpMessageConverter;
<ide> *
<ide> * @author Rossen Stoyanchev
<ide> * @author Juergen Hoeller
<add> * @author Sebastien Deleuze
<ide> * @since 3.2
<ide> */
<ide> public class AllEncompassingFormHttpMessageConverter extends FormHttpMessageConverter {
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<ide> private static final boolean jaxb2Present;
<ide>
<ide> private static final boolean jackson2Present;
<ide> public class AllEncompassingFormHttpMessageConverter extends FormHttpMessageConv
<ide>
<ide>
<ide> public AllEncompassingFormHttpMessageConverter() {
<del> try {
<del> addPartConverter(new SourceHttpMessageConverter<>());
<del> }
<del> catch (Error err) {
<del> // Ignore when no TransformerFactory implementation is available
<del> }
<del>
<del> if (jaxb2Present && !jackson2XmlPresent) {
<del> addPartConverter(new Jaxb2RootElementHttpMessageConverter());
<add> if (!shouldIgnoreXml) {
<add> try {
<add> addPartConverter(new SourceHttpMessageConverter<>());
<add> }
<add> catch (Error err) {
<add> // Ignore when no TransformerFactory implementation is available
<add> }
<add>
<add> if (jaxb2Present) {
<add> addPartConverter(new Jaxb2RootElementHttpMessageConverter());
<add> }
<ide> }
<ide>
<ide> if (jackson2Present) {
<ide> else if (jsonbPresent) {
<ide> addPartConverter(new JsonbHttpMessageConverter());
<ide> }
<ide>
<del> if (jackson2XmlPresent) {
<add> if (jackson2XmlPresent && !shouldIgnoreXml) {
<ide> addPartConverter(new MappingJackson2XmlHttpMessageConverter());
<ide> }
<ide>
<ide><path>spring-web/src/main/java/org/springframework/web/client/RestTemplate.java
<ide> import java.util.stream.Stream;
<ide>
<ide> import org.springframework.core.ParameterizedTypeReference;
<add>import org.springframework.core.SpringProperties;
<ide> import org.springframework.http.HttpEntity;
<ide> import org.springframework.http.HttpHeaders;
<ide> import org.springframework.http.HttpMethod;
<ide> * @author Roy Clarkson
<ide> * @author Juergen Hoeller
<ide> * @author Sam Brannen
<add> * @author Sebastien Deleuze
<ide> * @since 3.0
<ide> * @see HttpMessageConverter
<ide> * @see RequestCallback
<ide> */
<ide> public class RestTemplate extends InterceptingHttpAccessor implements RestOperations {
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<ide> private static final boolean romePresent;
<ide>
<ide> private static final boolean jaxb2Present;
<ide> public RestTemplate() {
<ide> this.messageConverters.add(new ByteArrayHttpMessageConverter());
<ide> this.messageConverters.add(new StringHttpMessageConverter());
<ide> this.messageConverters.add(new ResourceHttpMessageConverter(false));
<del> try {
<del> this.messageConverters.add(new SourceHttpMessageConverter<>());
<del> }
<del> catch (Error err) {
<del> // Ignore when no TransformerFactory implementation is available
<add> if (!shouldIgnoreXml) {
<add> try {
<add> this.messageConverters.add(new SourceHttpMessageConverter<>());
<add> }
<add> catch (Error err) {
<add> // Ignore when no TransformerFactory implementation is available
<add> }
<ide> }
<ide> this.messageConverters.add(new AllEncompassingFormHttpMessageConverter());
<ide>
<ide> public RestTemplate() {
<ide> this.messageConverters.add(new RssChannelHttpMessageConverter());
<ide> }
<ide>
<del> if (jackson2XmlPresent) {
<del> this.messageConverters.add(new MappingJackson2XmlHttpMessageConverter());
<del> }
<del> else if (jaxb2Present) {
<del> this.messageConverters.add(new Jaxb2RootElementHttpMessageConverter());
<add> if (!shouldIgnoreXml) {
<add> if (jackson2XmlPresent) {
<add> this.messageConverters.add(new MappingJackson2XmlHttpMessageConverter());
<add> }
<add> else if (jaxb2Present) {
<add> this.messageConverters.add(new Jaxb2RootElementHttpMessageConverter());
<add> }
<ide> }
<ide>
<ide> if (jackson2Present) {
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupport.java
<ide> import org.springframework.context.annotation.Bean;
<ide> import org.springframework.context.annotation.Configuration;
<ide> import org.springframework.context.annotation.Lazy;
<add>import org.springframework.core.SpringProperties;
<ide> import org.springframework.core.convert.converter.Converter;
<ide> import org.springframework.format.Formatter;
<ide> import org.springframework.format.FormatterRegistry;
<ide> */
<ide> public class WebMvcConfigurationSupport implements ApplicationContextAware, ServletContextAware {
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<ide> private static final boolean romePresent;
<ide>
<ide> private static final boolean jaxb2Present;
<ide> protected Map<String, MediaType> getDefaultMediaTypes() {
<ide> map.put("atom", MediaType.APPLICATION_ATOM_XML);
<ide> map.put("rss", MediaType.APPLICATION_RSS_XML);
<ide> }
<del> if (jaxb2Present || jackson2XmlPresent) {
<add> if (!shouldIgnoreXml && (jaxb2Present || jackson2XmlPresent)) {
<ide> map.put("xml", MediaType.APPLICATION_XML);
<ide> }
<ide> if (jackson2Present || gsonPresent || jsonbPresent) {
<ide> protected final void addDefaultHttpMessageConverters(List<HttpMessageConverter<?
<ide> messageConverters.add(new StringHttpMessageConverter());
<ide> messageConverters.add(new ResourceHttpMessageConverter());
<ide> messageConverters.add(new ResourceRegionHttpMessageConverter());
<del> try {
<del> messageConverters.add(new SourceHttpMessageConverter<>());
<del> }
<del> catch (Throwable ex) {
<del> // Ignore when no TransformerFactory implementation is available...
<add> if (!shouldIgnoreXml) {
<add> try {
<add> messageConverters.add(new SourceHttpMessageConverter<>());
<add> }
<add> catch (Throwable ex) {
<add> // Ignore when no TransformerFactory implementation is available...
<add> }
<ide> }
<ide> messageConverters.add(new AllEncompassingFormHttpMessageConverter());
<ide>
<ide> protected final void addDefaultHttpMessageConverters(List<HttpMessageConverter<?
<ide> messageConverters.add(new RssChannelHttpMessageConverter());
<ide> }
<ide>
<del> if (jackson2XmlPresent) {
<del> Jackson2ObjectMapperBuilder builder = Jackson2ObjectMapperBuilder.xml();
<del> if (this.applicationContext != null) {
<del> builder.applicationContext(this.applicationContext);
<add> if (!shouldIgnoreXml) {
<add> if (jackson2XmlPresent) {
<add> Jackson2ObjectMapperBuilder builder = Jackson2ObjectMapperBuilder.xml();
<add> if (this.applicationContext != null) {
<add> builder.applicationContext(this.applicationContext);
<add> }
<add> messageConverters.add(new MappingJackson2XmlHttpMessageConverter(builder.build()));
<add> }
<add> else if (jaxb2Present) {
<add> messageConverters.add(new Jaxb2RootElementHttpMessageConverter());
<ide> }
<del> messageConverters.add(new MappingJackson2XmlHttpMessageConverter(builder.build()));
<del> }
<del> else if (jaxb2Present) {
<del> messageConverters.add(new Jaxb2RootElementHttpMessageConverter());
<ide> }
<ide>
<ide> if (jackson2Present) {
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/function/support/RouterFunctionMapping.java
<ide> import org.springframework.beans.factory.BeanFactoryUtils;
<ide> import org.springframework.beans.factory.InitializingBean;
<ide> import org.springframework.context.ApplicationContext;
<add>import org.springframework.core.SpringProperties;
<ide> import org.springframework.http.converter.ByteArrayHttpMessageConverter;
<ide> import org.springframework.http.converter.HttpMessageConverter;
<ide> import org.springframework.http.converter.StringHttpMessageConverter;
<ide> * {@linkplain org.springframework.core.annotation.Order order}.
<ide> *
<ide> * @author Arjen Poutsma
<add> * @author Sebastien Deleuze
<ide> * @since 5.2
<ide> */
<ide> public class RouterFunctionMapping extends AbstractHandlerMapping implements InitializingBean {
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<add>
<ide> @Nullable
<ide> private RouterFunction<?> routerFunction;
<ide>
<ide> private void initMessageConverters() {
<ide> messageConverters.add(new ByteArrayHttpMessageConverter());
<ide> messageConverters.add(new StringHttpMessageConverter());
<ide>
<del> try {
<del> messageConverters.add(new SourceHttpMessageConverter<>());
<del> }
<del> catch (Error err) {
<del> // Ignore when no TransformerFactory implementation is available
<add> if (!shouldIgnoreXml) {
<add> try {
<add> messageConverters.add(new SourceHttpMessageConverter<>());
<add> }
<add> catch (Error err) {
<add> // Ignore when no TransformerFactory implementation is available
<add> }
<ide> }
<ide> messageConverters.add(new AllEncompassingFormHttpMessageConverter());
<ide>
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/ExceptionHandlerExceptionResolver.java
<ide> import org.springframework.beans.factory.InitializingBean;
<ide> import org.springframework.context.ApplicationContext;
<ide> import org.springframework.context.ApplicationContextAware;
<add>import org.springframework.core.SpringProperties;
<ide> import org.springframework.http.HttpStatus;
<ide> import org.springframework.http.converter.ByteArrayHttpMessageConverter;
<ide> import org.springframework.http.converter.HttpMessageConverter;
<ide> *
<ide> * @author Rossen Stoyanchev
<ide> * @author Juergen Hoeller
<add> * @author Sebastien Deleuze
<ide> * @since 3.1
<ide> */
<ide> public class ExceptionHandlerExceptionResolver extends AbstractHandlerMethodExceptionResolver
<ide> implements ApplicationContextAware, InitializingBean {
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<add>
<ide> @Nullable
<ide> private List<HandlerMethodArgumentResolver> customArgumentResolvers;
<ide>
<ide> public ExceptionHandlerExceptionResolver() {
<ide> this.messageConverters = new ArrayList<>();
<ide> this.messageConverters.add(new ByteArrayHttpMessageConverter());
<ide> this.messageConverters.add(new StringHttpMessageConverter());
<del> try {
<del> this.messageConverters.add(new SourceHttpMessageConverter<>());
<del> }
<del> catch (Error err) {
<del> // Ignore when no TransformerFactory implementation is available
<add> if(!shouldIgnoreXml) {
<add> try {
<add> this.messageConverters.add(new SourceHttpMessageConverter<>());
<add> }
<add> catch (Error err) {
<add> // Ignore when no TransformerFactory implementation is available
<add> }
<ide> }
<ide> this.messageConverters.add(new AllEncompassingFormHttpMessageConverter());
<ide> }
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/RequestMappingHandlerAdapter.java
<ide> import org.springframework.core.MethodIntrospector;
<ide> import org.springframework.core.ParameterNameDiscoverer;
<ide> import org.springframework.core.ReactiveAdapterRegistry;
<add>import org.springframework.core.SpringProperties;
<ide> import org.springframework.core.annotation.AnnotatedElementUtils;
<ide> import org.springframework.core.log.LogFormatUtils;
<ide> import org.springframework.core.task.AsyncTaskExecutor;
<ide> *
<ide> * @author Rossen Stoyanchev
<ide> * @author Juergen Hoeller
<add> * @author Sebastien Deleuze
<ide> * @since 3.1
<ide> * @see HandlerMethodArgumentResolver
<ide> * @see HandlerMethodReturnValueHandler
<ide> */
<ide> public class RequestMappingHandlerAdapter extends AbstractHandlerMethodAdapter
<ide> implements BeanFactoryAware, InitializingBean {
<ide>
<add> /**
<add> * Boolean flag controlled by a {@code spring.xml.ignore} system property that instructs Spring to
<add> * ignore XML, i.e. to not initialize the XML-related infrastructure.
<add> * <p>The default is "false".
<add> */
<add> private static final boolean shouldIgnoreXml = SpringProperties.getFlag("spring.xml.ignore");
<add>
<ide> /**
<ide> * MethodFilter that matches {@link InitBinder @InitBinder} methods.
<ide> */
<ide> public RequestMappingHandlerAdapter() {
<ide> this.messageConverters = new ArrayList<>(4);
<ide> this.messageConverters.add(new ByteArrayHttpMessageConverter());
<ide> this.messageConverters.add(new StringHttpMessageConverter());
<del> try {
<del> this.messageConverters.add(new SourceHttpMessageConverter<>());
<del> }
<del> catch (Error err) {
<del> // Ignore when no TransformerFactory implementation is available
<add> if (!shouldIgnoreXml) {
<add> try {
<add> this.messageConverters.add(new SourceHttpMessageConverter<>());
<add> }
<add> catch (Error err) {
<add> // Ignore when no TransformerFactory implementation is available
<add> }
<ide> }
<ide> this.messageConverters.add(new AllEncompassingFormHttpMessageConverter());
<ide> } | 10 |
PHP | PHP | fix tests around paths for windows | 1b61a0c78787ec50d70cb96b514925c0cea4cc92 | <ide><path>tests/TestCase/Core/AppTest.php
<ide> public function testPathWithPlugins() {
<ide> Plugin::load('TestPlugin');
<ide>
<ide> $result = App::path('Controller', 'TestPlugin');
<del> $this->assertEquals($basepath . 'TestPlugin' . DS . 'Controller' . DS, $result[0]);
<add> $this->assertPathEquals($basepath . 'TestPlugin' . DS . 'Controller' . DS, $result[0]);
<ide> }
<ide>
<ide> /**
<ide> public function testPluginPath() {
<ide>
<ide> $path = App::pluginPath('TestPlugin');
<ide> $expected = TEST_APP . 'Plugin' . DS . 'TestPlugin' . DS;
<del> $this->assertEquals($expected, $path);
<add> $this->assertPathEquals($expected, $path);
<ide>
<ide> $path = App::pluginPath('TestPluginTwo');
<ide> $expected = TEST_APP . 'Plugin' . DS . 'TestPluginTwo' . DS;
<del> $this->assertEquals($expected, $path);
<add> $this->assertPathEquals($expected, $path);
<ide> }
<ide>
<ide> /**
<ide> public function testPluginPath() {
<ide> public function testThemePath() {
<ide> $path = App::themePath('test_theme');
<ide> $expected = TEST_APP . 'TestApp' . DS . 'Template' . DS . 'Themed' . DS . 'TestTheme' . DS;
<del> $this->assertEquals($expected, $path);
<add> $this->assertPathEquals($expected, $path);
<ide>
<ide> $path = App::themePath('TestTheme');
<ide> $expected = TEST_APP . 'TestApp' . DS . 'Template' . DS . 'Themed' . DS . 'TestTheme' . DS;
<del> $this->assertEquals($expected, $path);
<add> $this->assertPathEquals($expected, $path);
<ide> }
<ide>
<ide> }
<ide><path>tests/TestCase/Core/PluginTest.php
<ide> public function testLoadNotFound() {
<ide> public function testPath() {
<ide> Plugin::load(array('TestPlugin', 'TestPluginTwo'));
<ide> $expected = TEST_APP . 'Plugin' . DS . 'TestPlugin' . DS;
<del> $this->assertEquals(Plugin::path('TestPlugin'), $expected);
<add> $this->assertPathEquals(Plugin::path('TestPlugin'), $expected);
<ide>
<ide> $expected = TEST_APP . 'Plugin' . DS . 'TestPluginTwo' . DS;
<del> $this->assertEquals(Plugin::path('TestPluginTwo'), $expected);
<add> $this->assertPathEquals(Plugin::path('TestPluginTwo'), $expected);
<ide> }
<ide>
<ide> /** | 2 |
Go | Go | fix buildfile tests after rebase | 49044a96089487b9df075fa972e83e4c05c7fae8 | <ide><path>buildfile_test.go
<ide> func TestVolume(t *testing.T) {
<ide> pushingPool: make(map[string]struct{}),
<ide> }
<ide>
<del> buildfile := NewBuildFile(srv, ioutil.Discard)
<add> buildfile := NewBuildFile(srv, ioutil.Discard, false)
<ide> imgId, err := buildfile.Build(mkTestContext(`
<ide> from %s
<ide> VOLUME /test
<ide> CMD Hello world
<ide> if len(img.Config.Volumes) == 0 {
<ide> t.Fail()
<ide> }
<del> for key, _ := range img.Config.Volumes {
<add> for key := range img.Config.Volumes {
<ide> if key != "/test" {
<ide> t.Fail()
<ide> } | 1 |
Javascript | Javascript | change var to let | 00000000fb5093730c82caf33737385029db7489 | <ide><path>test/sequential/test-repl-timeout-throw.js
<ide> child.stdin.write = function(original) {
<ide> }(child.stdin.write);
<ide>
<ide> child.stdout.once('data', function() {
<del> child.stdin.write('var throws = 0;');
<add> child.stdin.write('let throws = 0;');
<ide> child.stdin.write('process.on("exit",function(){console.log(throws)});');
<ide> child.stdin.write('function thrower(){console.log("THROW",throws++);XXX};');
<ide> child.stdin.write('setTimeout(thrower);""\n');
<ide> child.stdout.once('data', function() {
<ide> function eeTest() {
<ide> child.stdin.write('setTimeout(function() {\n' +
<ide> ' const events = require("events");\n' +
<del> ' var e = new events.EventEmitter;\n' +
<add> ' let e = new events.EventEmitter;\n' +
<ide> ' process.nextTick(function() {\n' +
<ide> ' e.on("x", thrower);\n' +
<ide> ' setTimeout(function() {\n' + | 1 |
Ruby | Ruby | add nil check in asset_path | 42a1b0c7c31400c762e2f8d1aff16338bbe8d63d | <ide><path>actionview/lib/action_view/helpers/asset_url_helper.rb
<ide> module AssetUrlHelper
<ide> # asset_path "application", type: :stylesheet # => /assets/application.css
<ide> # asset_path "http://www.example.com/js/xmlhr.js" # => http://www.example.com/js/xmlhr.js
<ide> def asset_path(source, options = {})
<add> raise ArgumentError, "Cannot pass nil as asset source" if source.nil?
<add>
<ide> source = source.to_s
<ide> return "" unless source.present?
<ide> return source if source =~ URI_REGEXP
<ide><path>actionview/test/template/asset_tag_helper_test.rb
<ide> def test_asset_path_tag
<ide> AssetPathToTag.each { |method, tag| assert_dom_equal(tag, eval(method)) }
<ide> end
<ide>
<add> def test_asset_path_tag_raises_an_error_for_nil_source
<add> exception = assert_raise(ArgumentError) { asset_path(nil) }
<add> assert_equal("Cannot pass nil as asset source", exception.message)
<add> end
<add>
<ide> def test_asset_path_tag_to_not_create_duplicate_slashes
<ide> @controller.config.asset_host = "host/"
<ide> assert_dom_equal('http://host/foo', asset_path("foo")) | 2 |
Javascript | Javascript | remove invalid @supports condition | b932eaf6a79d8fc3dd28bcd8e5e509a7fe06a4ac | <ide><path>web/pdf_print_service.js
<ide> PDFPrintService.prototype = {
<ide> this.pageStyleSheet = document.createElement("style");
<ide> const pageSize = this.pagesOverview[0];
<ide> this.pageStyleSheet.textContent =
<del> // "size:<width> <height>" is what we need. But also add "A4" because
<del> // Firefox incorrectly reports support for the other value.
<del> "@supports ((size:A4) and (size:1pt 1pt)) {" +
<del> "@page { size: " +
<del> pageSize.width +
<del> "pt " +
<del> pageSize.height +
<del> "pt;}" +
<del> "}";
<add> "@page { size: " + pageSize.width + "pt " + pageSize.height + "pt;}";
<ide> body.appendChild(this.pageStyleSheet);
<ide> },
<ide> | 1 |
Text | Text | update 6.x to 8.x in backporting wiki | ab35194cb0297a85de5384b529f2cc03293d93bb | <ide><path>doc/guides/backporting-to-release-lines.md
<ide> commits be cherry-picked or backported.
<ide>
<ide> ## How to submit a backport pull request
<ide>
<del>For the following steps, let's assume that a backport is needed for the v6.x
<del>release line. All commands will use the `v6.x-staging` branch as the target
<add>For the following steps, let's assume that a backport is needed for the v8.x
<add>release line. All commands will use the `v8.x-staging` branch as the target
<ide> branch. In order to submit a backport pull request to another branch, simply
<ide> replace that with the staging branch for the targeted release line.
<ide>
<ide> replace that with the staging branch for the targeted release line.
<ide> # the origin remote points to your fork, and the upstream remote points
<ide> # to git://github.com/nodejs/node
<ide> cd $NODE_DIR
<del># If v6.x-staging is checked out `pull` should be used instead of `fetch`
<del>git fetch upstream v6.x-staging:v6.x-staging -f
<add># If v8.x-staging is checked out `pull` should be used instead of `fetch`
<add>git fetch upstream v8.x-staging:v8.x-staging -f
<ide> # Assume we want to backport PR #10157
<del>git checkout -b backport-10157-to-v6.x v6.x-staging
<add>git checkout -b backport-10157-to-v8.x v8.x-staging
<ide> # Ensure there are no test artifacts from previous builds
<ide> # Note that this command deletes all files and directories
<ide> # not under revision control below the ./test directory.
<ide> hint: and commit the result with 'git commit'
<ide> 7. Make sure `make -j4 test` passes.
<ide> 8. Push the changes to your fork
<ide> 9. Open a pull request:
<del> 1. Be sure to target the `v6.x-staging` branch in the pull request.
<add> 1. Be sure to target the `v8.x-staging` branch in the pull request.
<ide> 2. Include the backport target in the pull request title in the following
<del> format — `[v6.x backport] <commit title>`.
<del> Example: `[v6.x backport] process: improve performance of nextTick`
<add> format — `[v8.x backport] <commit title>`.
<add> Example: `[v8.x backport] process: improve performance of nextTick`
<ide> 3. Check the checkbox labeled "Allow edits from maintainers".
<ide> 4. In the description add a reference to the original PR
<ide> 5. Run a [`node-test-pull-request`][] CI job (with `REBASE_ONTO` set to the
<ide> default `<pr base branch>`)
<ide> 10. If during the review process conflicts arise, use the following to rebase:
<del> `git pull --rebase upstream v6.x-staging`
<add> `git pull --rebase upstream v8.x-staging`
<ide>
<del>After the PR lands replace the `backport-requested-v6.x` label on the original
<del>PR with `backported-to-v6.x`.
<add>After the PR lands replace the `backport-requested-v8.x` label on the original
<add>PR with `backported-to-v8.x`.
<ide>
<ide> [Release Schedule]: https://github.com/nodejs/Release#release-schedule1
<ide> [Release Plan]: https://github.com/nodejs/Release#release-plan | 1 |
PHP | PHP | update type_template constant value | 9fab5a3c85188a626c9bf557c4a9d93ea756d79e | <ide><path>src/View/View.php
<ide> class View implements EventDispatcherInterface
<ide> *
<ide> * @var string
<ide> */
<del> public const TYPE_TEMPLATE = 'view';
<add> public const TYPE_TEMPLATE = 'template';
<ide>
<ide> /**
<ide> * Constant for view file type 'element' | 1 |
Python | Python | add iops in the diskio plugin (issue #763) | 16a62781e4e19ef0e3a3efa2c5dc44a673cf4fa2 | <ide><path>glances/main.py
<ide> def init_args(self):
<ide> dest='byte', help='display network rate in byte per second')
<ide> parser.add_argument('--diskio-show-ramfs', action='store_true', default=False,
<ide> dest='diskio_show_ramfs', help='show RAM Fs in the DiskIO plugin')
<add> parser.add_argument('--diskio-iops', action='store_true', default=False,
<add> dest='diskio_iops', help='show IO per second in the DiskIO plugin')
<ide> parser.add_argument('--fahrenheit', action='store_true', default=False,
<ide> dest='fahrenheit', help='display temperature in Fahrenheit (default is Celsius)')
<ide> parser.add_argument('-1', '--percpu', action='store_true', default=False,
<ide><path>glances/outputs/glances_curses.py
<ide> def __catch_key(self, return_to_browser=False):
<ide> glances_processes.sort_key = 'cpu_percent'
<ide> elif self.pressedkey == ord('b'):
<ide> # 'b' > Switch between bit/s and Byte/s for network IO
<del> # self.net_byteps_tag = not self.net_byteps_tag
<ide> self.args.byte = not self.args.byte
<add> elif self.pressedkey == ord('B'):
<add> # 'B' > Switch between bit/s and IO/s for Disk IO
<add> self.args.diskio_iops = not self.args.diskio_iops
<ide> elif self.pressedkey == ord('c'):
<ide> # 'c' > Sort processes by CPU usage
<ide> glances_processes.auto_sort = False
<ide><path>glances/plugins/glances_diskio.py
<ide> def update(self):
<ide> if self.is_hide(disk):
<ide> continue
<ide>
<del> # Compute bitrate
<add> # Compute count and bit rate
<ide> try:
<add> read_count = (diskio_new[disk].read_count -
<add> self.diskio_old[disk].read_count)
<add> write_count = (diskio_new[disk].write_count -
<add> self.diskio_old[disk].write_count)
<ide> read_bytes = (diskio_new[disk].read_bytes -
<ide> self.diskio_old[disk].read_bytes)
<ide> write_bytes = (diskio_new[disk].write_bytes -
<ide> self.diskio_old[disk].write_bytes)
<ide> diskstat = {
<ide> 'time_since_update': time_since_update,
<ide> 'disk_name': disk,
<add> 'read_count': read_count,
<add> 'write_count': write_count,
<ide> 'read_bytes': read_bytes,
<ide> 'write_bytes': write_bytes}
<ide> except KeyError:
<ide> def msg_curse(self, args=None):
<ide> # Header
<ide> msg = '{0:9}'.format('DISK I/O')
<ide> ret.append(self.curse_add_line(msg, "TITLE"))
<del> msg = '{0:>7}'.format('R/s')
<del> ret.append(self.curse_add_line(msg))
<del> msg = '{0:>7}'.format('W/s')
<del> ret.append(self.curse_add_line(msg))
<add> if args.diskio_iops:
<add> msg = '{0:>7}'.format('IOR/s')
<add> ret.append(self.curse_add_line(msg))
<add> msg = '{0:>7}'.format('IOW/s')
<add> ret.append(self.curse_add_line(msg))
<add> else:
<add> msg = '{0:>7}'.format('R/s')
<add> ret.append(self.curse_add_line(msg))
<add> msg = '{0:>7}'.format('W/s')
<add> ret.append(self.curse_add_line(msg))
<ide> # Disk list (sorted by name)
<ide> for i in sorted(self.stats, key=operator.itemgetter(self.get_key())):
<ide> # Is there an alias for the disk name ?
<ide> def msg_curse(self, args=None):
<ide> disk_name = '_' + disk_name[-8:]
<ide> msg = '{0:9}'.format(disk_name)
<ide> ret.append(self.curse_add_line(msg))
<del> txps = self.auto_unit(
<del> int(i['read_bytes'] // i['time_since_update']))
<del> rxps = self.auto_unit(
<del> int(i['write_bytes'] // i['time_since_update']))
<del> msg = '{0:>7}'.format(txps)
<del> ret.append(self.curse_add_line(msg,
<del> self.get_views(item=i[self.get_key()],
<del> key='read_bytes',
<del> option='decoration')))
<del> msg = '{0:>7}'.format(rxps)
<del> ret.append(self.curse_add_line(msg,
<del> self.get_views(item=i[self.get_key()],
<del> key='write_bytes',
<del> option='decoration')))
<add> if args.diskio_iops:
<add> # count
<add> txps = self.auto_unit(
<add> int(i['read_count'] // i['time_since_update']))
<add> rxps = self.auto_unit(
<add> int(i['write_count'] // i['time_since_update']))
<add> msg = '{0:>7}'.format(txps)
<add> ret.append(self.curse_add_line(msg,
<add> self.get_views(item=i[self.get_key()],
<add> key='read_count',
<add> option='decoration')))
<add> msg = '{0:>7}'.format(rxps)
<add> ret.append(self.curse_add_line(msg,
<add> self.get_views(item=i[self.get_key()],
<add> key='write_count',
<add> option='decoration')))
<add> else:
<add> # Bitrate
<add> txps = self.auto_unit(
<add> int(i['read_bytes'] // i['time_since_update']))
<add> rxps = self.auto_unit(
<add> int(i['write_bytes'] // i['time_since_update']))
<add> msg = '{0:>7}'.format(txps)
<add> ret.append(self.curse_add_line(msg,
<add> self.get_views(item=i[self.get_key()],
<add> key='read_bytes',
<add> option='decoration')))
<add> msg = '{0:>7}'.format(rxps)
<add> ret.append(self.curse_add_line(msg,
<add> self.get_views(item=i[self.get_key()],
<add> key='write_bytes',
<add> option='decoration')))
<ide>
<ide> return ret
<ide><path>glances/plugins/glances_help.py
<ide> def generate_view_data(self):
<ide> self.view_data['enable_disable_docker'] = msg_col2.format('D', 'Enable/disable Docker stats')
<ide> self.view_data['enable_disable_quick_look'] = msg_col.format('3', 'Enable/disable quick look plugin')
<ide> self.view_data['show_hide_ip'] = msg_col2.format('I', 'Show/hide IP module')
<add> self.view_data['diskio_iops'] = msg_col2.format('B', 'Count/rate for Disk I/O')
<ide> self.view_data['edit_pattern_filter'] = 'ENTER: Edit the process filter pattern'
<ide>
<ide> def get_view_data(self, args=None):
<ide> def msg_curse(self, args=None):
<ide> ret.append(self.curse_add_line(self.view_data['show_hide_help']))
<ide> ret.append(self.curse_new_line())
<ide> ret.append(self.curse_add_line(self.view_data['enable_disable_quick_look']))
<del> ret.append(self.curse_add_line(self.view_data['quit']))
<add> ret.append(self.curse_add_line(self.view_data['diskio_iops']))
<ide> ret.append(self.curse_new_line())
<ide> ret.append(self.curse_add_line(self.view_data['enable_disable_top_extends_stats']))
<ide> ret.append(self.curse_new_line())
<ide> ret.append(self.curse_add_line(self.view_data['enable_disable_short_processname']))
<ide> ret.append(self.curse_new_line())
<ide> ret.append(self.curse_add_line(self.view_data['enable_disable_irix']))
<add> ret.append(self.curse_add_line(self.view_data['quit']))
<ide> ret.append(self.curse_new_line())
<ide>
<ide> ret.append(self.curse_new_line())
<ide><path>glances/plugins/glances_processlist.py
<ide> def __msg_curse_header(self, ret, process_sort_key, args=None):
<ide> ret.append(self.curse_add_line(msg))
<ide> msg = '{0:>10}'.format('TIME+')
<ide> ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'cpu_times' else 'DEFAULT', optional=True))
<del> msg = '{0:>6}'.format('IOR/s')
<add> msg = '{0:>6}'.format('R/s')
<ide> ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'io_counters' else 'DEFAULT', optional=True, additional=True))
<del> msg = '{0:>6}'.format('IOW/s')
<add> msg = '{0:>6}'.format('W/s')
<ide> ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'io_counters' else 'DEFAULT', optional=True, additional=True))
<ide> msg = ' {0:8}'.format('Command')
<ide> ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'name' else 'DEFAULT')) | 5 |
Javascript | Javascript | drop tests for removed isvirtual behaviors | a1bff0683604ee321f5acc10ea03d9d05ded0645 | <ide><path>packages/ember-views/tests/views/view/virtual_views_test.js
<del>import { get } from "ember-metal/property_get";
<del>import run from "ember-metal/run_loop";
<del>import jQuery from "ember-views/system/jquery";
<del>import EmberView from "ember-views/views/view";
<del>
<del>var rootView, childView;
<del>
<del>QUnit.module("virtual views", {
<del> teardown() {
<del> run(function() {
<del> rootView.destroy();
<del> childView.destroy();
<del> });
<del> }
<del>});
<del>
<del>QUnit.skip("a virtual view does not appear as a view's parentView", function() {
<del> rootView = EmberView.create({
<del> elementId: 'root-view',
<del>
<del> render(buffer) {
<del> buffer.push("<h1>Hi</h1>");
<del> this.appendChild(virtualView);
<del> }
<del> });
<del>
<del> var virtualView = EmberView.create({
<del> isVirtual: true,
<del> tagName: '',
<del>
<del> render(buffer) {
<del> buffer.push("<h2>Virtual</h2>");
<del> this.appendChild(childView);
<del> }
<del> });
<del>
<del> childView = EmberView.create({
<del> render(buffer) {
<del> buffer.push("<p>Bye!</p>");
<del> }
<del> });
<del>
<del> run(function() {
<del> jQuery("#qunit-fixture").empty();
<del> rootView.appendTo("#qunit-fixture");
<del> });
<del>
<del> equal(jQuery("#root-view > h2").length, 1, "nodes with '' tagName do not create wrappers");
<del> equal(get(childView, 'parentView'), rootView);
<del>
<del> var children = get(rootView, 'childViews');
<del>
<del> equal(get(children, 'length'), 1, "there is one child element");
<del> equal(children.objectAt(0), childView, "the child element skips through the virtual view");
<del>});
<del>
<del>QUnit.skip("when a virtual view's child views change, the parent's childViews should reflect", function() {
<del> rootView = EmberView.create({
<del> elementId: 'root-view',
<del>
<del> render(buffer) {
<del> buffer.push("<h1>Hi</h1>");
<del> this.appendChild(virtualView);
<del> }
<del> });
<del>
<del> var virtualView = EmberView.create({
<del> isVirtual: true,
<del> tagName: '',
<del>
<del> render(buffer) {
<del> buffer.push("<h2>Virtual</h2>");
<del> this.appendChild(childView);
<del> }
<del> });
<del>
<del> childView = EmberView.create({
<del> render(buffer) {
<del> buffer.push("<p>Bye!</p>");
<del> }
<del> });
<del>
<del> run(function() {
<del> jQuery("#qunit-fixture").empty();
<del> rootView.appendTo("#qunit-fixture");
<del> });
<del>
<del> equal(virtualView.get('childViews.length'), 1, "has childView - precond");
<del> equal(rootView.get('childViews.length'), 1, "has childView - precond");
<del>
<del> run(function() {
<del> childView.removeFromParent();
<del> });
<del>
<del> equal(virtualView.get('childViews.length'), 0, "has no childView");
<del> equal(rootView.get('childViews.length'), 0, "has no childView");
<del>}); | 1 |
Ruby | Ruby | restore recursive tap search | af272e04c7167d348d2363a5e02bba4029b903c7 | <ide><path>Library/Homebrew/formulary.rb
<ide> class TapLoader < FormulaLoader
<ide> def initialize tapped_name
<ide> @tapped_name = tapped_name
<ide> user, repo, name = tapped_name.split("/", 3).map(&:downcase)
<del> path = Pathname.new("#{HOMEBREW_LIBRARY}/Taps/#{user}-#{repo}/#{name}.rb")
<add> tap = Pathname.new("#{HOMEBREW_LIBRARY}/Taps/#{user}-#{repo}")
<add> path = tap.join("#{name}.rb")
<add>
<add> if tap.directory?
<add> tap.find_formula do |child|
<add> if child.basename(".rb").to_s == name
<add> path = tap.join(child)
<add> end
<add> end
<add> end
<add>
<ide> super name, path
<ide> end
<ide> | 1 |
Javascript | Javascript | update jsm renderers | 5da0acd70dd7060a68099b77b54b5c63fd8f4d71 | <ide><path>examples/jsm/renderers/CSS2DRenderer.js
<ide> import {
<ide> Matrix4,
<ide> Object3D,
<del> REVISION,
<ide> Vector3
<ide> } from "../../../build/three.module.js";
<ide>
<ide> CSS2DObject.prototype.constructor = CSS2DObject;
<ide>
<ide> var CSS2DRenderer = function () {
<ide>
<del> console.log( 'THREE.CSS2DRenderer', REVISION );
<del>
<ide> var _width, _height;
<ide> var _widthHalf, _heightHalf;
<ide>
<ide><path>examples/jsm/renderers/CSS3DRenderer.js
<ide> import {
<ide> Matrix4,
<ide> Object3D,
<del> REVISION,
<ide> Vector3
<ide> } from "../../../build/three.module.js";
<ide>
<ide> CSS3DSprite.prototype.constructor = CSS3DSprite;
<ide>
<ide> var CSS3DRenderer = function () {
<ide>
<del> console.log( 'THREE.CSS3DRenderer', REVISION );
<del>
<ide> var _width, _height;
<ide> var _widthHalf, _heightHalf;
<ide>
<ide><path>examples/jsm/renderers/RaytracingRenderer.js
<ide>
<ide> import {
<ide> Color,
<del> EventDispatcher,
<del> REVISION
<add> EventDispatcher
<ide> } from "../../../build/three.module.js";
<ide>
<ide> var RaytracingRenderer = function ( parameters ) {
<ide>
<del> console.log( 'THREE.RaytracingRenderer', REVISION );
<del>
<ide> parameters = parameters || {};
<ide>
<ide> var scope = this;
<ide><path>examples/jsm/renderers/SVGRenderer.js
<ide> import {
<ide> Matrix3,
<ide> Matrix4,
<ide> Object3D,
<del> REVISION,
<ide> Vector3,
<ide> VertexColors
<ide> } from "../../../build/three.module.js";
<ide> SVGObject.prototype.constructor = SVGObject;
<ide>
<ide> var SVGRenderer = function () {
<ide>
<del> console.log( 'THREE.SVGRenderer', REVISION );
<del>
<ide> var _this = this,
<ide> _renderData, _elements, _lights,
<ide> _projector = new Projector(),
<ide><path>examples/jsm/renderers/SoftwareRenderer.js
<ide> import {
<ide> MeshBasicMaterial,
<ide> MeshLambertMaterial,
<ide> MeshPhongMaterial,
<del> REVISION,
<ide> SpriteMaterial,
<ide> Vector2,
<ide> Vector3,
<ide> import { RenderableSprite } from "../renderers/Projector.js";
<ide>
<ide> var SoftwareRenderer = function ( parameters ) {
<ide>
<del> console.log( 'THREE.SoftwareRenderer', REVISION );
<del>
<ide> parameters = parameters || {};
<ide>
<ide> var canvas = parameters.canvas !== undefined | 5 |
Text | Text | fix incorrect module name [ci skip] | 0ae20627d5c65b259354b587cdcd1b67d64092ce | <ide><path>guides/source/api_app.md
<ide> Some common modules you might want to add:
<ide> and translation methods.
<ide> - `ActionController::HttpAuthentication::Basic` (or `Digest` or `Token`): Support
<ide> for basic, digest or token HTTP authentication.
<del>- `AbstractController::Layouts`: Support for layouts when rendering.
<add>- `ActionView::Layouts`: Support for layouts when rendering.
<ide> - `ActionController::MimeResponds`: Support for `respond_to`.
<ide> - `ActionController::Cookies`: Support for `cookies`, which includes
<ide> support for signed and encrypted cookies. This requires the cookies middleware. | 1 |
Javascript | Javascript | make process.mixin copy over undefined values | 876b6d21832ece6c51021fe0acc1d31c2ad4e32a | <ide><path>src/node.js
<ide> process.mixin = function() {
<ide> , copy );
<ide>
<ide> // Don't bring in undefined values
<del> else if ( copy !== undefined )
<add> else
<ide> target[ name ] = copy;
<ide>
<ide> }
<ide><path>test/mjsunit/test-process-mixin.js
<ide> var fakeDomElement = {deep: {nodeType: 4}};
<ide> target = {};
<ide> process.mixin(true, target, fakeDomElement);
<ide>
<del>assert.notStrictEqual(target.deep, fakeDomElement.deep);
<ide>\ No newline at end of file
<add>assert.notStrictEqual(target.deep, fakeDomElement.deep);
<add>
<add>var objectWithUndefinedValue = {foo: undefined};
<add>target = {};
<add>
<add>process.mixin(target, objectWithUndefinedValue);
<add>assert.ok(target.hasOwnProperty('foo'));
<ide>\ No newline at end of file | 2 |
Go | Go | fix race condition between list and remove volume | 800b9c5a2698aae5c43f42d4c9c1a41280b556a6 | <ide><path>volume/store/store.go
<ide> func (s *VolumeStore) List() ([]volume.Volume, []string, error) {
<ide>
<ide> s.locks.Lock(name)
<ide> storedV, exists := s.getNamed(name)
<del> if !exists {
<del> s.setNamed(v, "")
<del> }
<add> // Note: it's not safe to populate the cache here because the volume may have been
<add> // deleted before we acquire a lock on its name
<ide> if exists && storedV.DriverName() != v.DriverName() {
<ide> logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName())
<ide> s.locks.Unlock(v.Name())
<ide><path>volume/testutils/testutils.go
<ide> func (NoopVolume) Unmount() error { return nil }
<ide>
<ide> // FakeVolume is a fake volume with a random name
<ide> type FakeVolume struct {
<del> name string
<add> name string
<add> driverName string
<ide> }
<ide>
<ide> // NewFakeVolume creates a new fake volume for testing
<del>func NewFakeVolume(name string) volume.Volume {
<del> return FakeVolume{name: name}
<add>func NewFakeVolume(name string, driverName string) volume.Volume {
<add> return FakeVolume{name: name, driverName: driverName}
<ide> }
<ide>
<ide> // Name is the name of the volume
<ide> func (f FakeVolume) Name() string { return f.name }
<ide>
<ide> // DriverName is the name of the driver
<del>func (FakeVolume) DriverName() string { return "fake" }
<add>func (f FakeVolume) DriverName() string { return f.driverName }
<ide>
<ide> // Path is the filesystem path to the volume
<ide> func (FakeVolume) Path() string { return "fake" }
<ide> func (d *FakeDriver) Create(name string, opts map[string]string) (volume.Volume,
<ide> if opts != nil && opts["error"] != "" {
<ide> return nil, fmt.Errorf(opts["error"])
<ide> }
<del> v := NewFakeVolume(name)
<add> v := NewFakeVolume(name, d.name)
<ide> d.vols[name] = v
<ide> return v, nil
<ide> } | 2 |
PHP | PHP | fix doc block | 8f4592920dba0b6929f2034e836cee678ef63441 | <ide><path>src/Database/Type/BinaryUuidType.php
<ide> public function newId()
<ide> *
<ide> * @param null|string|resource $value The value to convert.
<ide> * @param \Cake\Database\Driver $driver The driver instance to convert with.
<del> * @return resource|null
<add> * @return resource|string|null
<ide> * @throws \Cake\Core\Exception\Exception
<ide> */
<ide> public function toPHP($value, Driver $driver) | 1 |
Text | Text | fix typo in /api/dns.md | e511c0b9ed1c49a7021748f3eec25e6bece1fd17 | <ide><path>doc/api/dns.md
<ide> added: v15.0.0
<ide> Uses the DNS protocol to resolve `CAA` records for the `hostname`. The
<ide> `addresses` argument passed to the `callback` function
<ide> will contain an array of certification authority authorization records
<del>available for the `hostname` (e.g. `[{critial: 0, iodef:
<add>available for the `hostname` (e.g. `[{critical: 0, iodef:
<ide> 'mailto:pki@example.com'}, {critical: 128, issue: 'pki.example.com'}]`).
<ide>
<ide> ## `dns.resolveMx(hostname, callback)`
<ide> added: v15.0.0
<ide> Uses the DNS protocol to resolve `CAA` records for the `hostname`. On success,
<ide> the `Promise` is resolved with an array of objects containing available
<ide> certification authority authorization records available for the `hostname`
<del>(e.g. `[{critial: 0, iodef: 'mailto:pki@example.com'},{critical: 128, issue:
<add>(e.g. `[{critical: 0, iodef: 'mailto:pki@example.com'},{critical: 128, issue:
<ide> 'pki.example.com'}]`).
<ide>
<ide> ### `dnsPromises.resolveCname(hostname)` | 1 |
Ruby | Ruby | convert pre/postflightblock test to spec | 8155d27e5c11f9d0adfbb6c911d1c5d6b479aa53 | <add><path>Library/Homebrew/cask/spec/cask/artifact/postflight_block_spec.rb
<del><path>Library/Homebrew/cask/test/cask/artifact/postflight_block_test.rb
<del>require "test_helper"
<add>require "spec_helper"
<ide>
<ide> describe Hbc::Artifact::PostflightBlock do
<ide> describe "install_phase" do
<ide> it "calls the specified block after installing, passing a Cask mini-dsl" do
<del> called = false
<add> called = false
<ide> yielded_arg = nil
<ide>
<ide> cask = Hbc::Cask.new("with-postflight") do
<ide> end
<ide> end
<ide>
<del> Hbc::Artifact::PostflightBlock.new(cask).install_phase
<add> described_class.new(cask).install_phase
<ide>
<del> called.must_equal true
<del> yielded_arg.must_be_kind_of Hbc::DSL::Postflight
<add> expect(called).to be true
<add> expect(yielded_arg).to be_kind_of(Hbc::DSL::Postflight)
<ide> end
<ide> end
<ide>
<ide> describe "uninstall_phase" do
<ide> it "calls the specified block after uninstalling, passing a Cask mini-dsl" do
<del> called = false
<add> called = false
<ide> yielded_arg = nil
<ide>
<ide> cask = Hbc::Cask.new("with-uninstall-postflight") do
<ide> end
<ide> end
<ide>
<del> Hbc::Artifact::PostflightBlock.new(cask).uninstall_phase
<add> described_class.new(cask).uninstall_phase
<ide>
<del> called.must_equal true
<del> yielded_arg.must_be_kind_of Hbc::DSL::UninstallPostflight
<add> expect(called).to be true
<add> expect(yielded_arg).to be_kind_of(Hbc::DSL::UninstallPostflight)
<ide> end
<ide> end
<ide> end
<add><path>Library/Homebrew/cask/spec/cask/artifact/preflight_block_spec.rb
<del><path>Library/Homebrew/cask/test/cask/artifact/preflight_block_test.rb
<del>require "test_helper"
<add>require "spec_helper"
<ide>
<ide> describe Hbc::Artifact::PreflightBlock do
<ide> describe "install_phase" do
<ide> it "calls the specified block before installing, passing a Cask mini-dsl" do
<del> called = false
<add> called = false
<ide> yielded_arg = nil
<ide>
<ide> cask = Hbc::Cask.new("with-preflight") do
<ide> end
<ide> end
<ide>
<del> Hbc::Artifact::PreflightBlock.new(cask).install_phase
<add> described_class.new(cask).install_phase
<ide>
<del> called.must_equal true
<del> yielded_arg.must_be_kind_of Hbc::DSL::Preflight
<add> expect(called).to be true
<add> expect(yielded_arg).to be_kind_of Hbc::DSL::Preflight
<ide> end
<ide> end
<ide>
<ide> describe "uninstall_phase" do
<ide> it "calls the specified block before uninstalling, passing a Cask mini-dsl" do
<del> called = false
<add> called = false
<ide> yielded_arg = nil
<ide>
<ide> cask = Hbc::Cask.new("with-uninstall-preflight") do
<ide> end
<ide> end
<ide>
<del> Hbc::Artifact::PreflightBlock.new(cask).uninstall_phase
<add> described_class.new(cask).uninstall_phase
<ide>
<del> called.must_equal true
<del> yielded_arg.must_be_kind_of Hbc::DSL::UninstallPreflight
<add> expect(called).to be true
<add> expect(yielded_arg).to be_kind_of Hbc::DSL::UninstallPreflight
<ide> end
<ide> end
<ide> end | 2 |
Mixed | Ruby | pass column to quote when copying a sqlite table | d3e5118e7d42a9425b843190380d12ed3ce1e5f9 | <ide><path>activerecord/CHANGELOG.md
<ide> ## Rails 4.0.0 (unreleased) ##
<ide>
<add>* Fix quoting for sqlite migrations using copy_table_contents() with binary
<add> columns.
<add>
<add> These would fail with "SQLite3::SQLException: unrecognized token" because
<add> the column was not being passed to quote() so the data was not quoted
<add> correctly.
<add>
<add> *Matthew M. Boedicker*
<add>
<ide> * Promotes `change_column_null` to the migrations API. This macro sets/removes
<ide> `NOT NULL` constraints, and accepts an optional argument to replace existing
<ide> `NULL`s if needed. The adapters for SQLite, MySQL, PostgreSQL, and (at least)
<ide><path>activerecord/lib/active_record/connection_adapters/sqlite3_adapter.rb
<ide> def copy_table_contents(from, to, columns, rename = {}) #:nodoc:
<ide> quoted_columns = columns.map { |col| quote_column_name(col) } * ','
<ide>
<ide> quoted_to = quote_table_name(to)
<add>
<add> raw_column_mappings = Hash[columns(from).map { |c| [c.name, c] }]
<add>
<ide> exec_query("SELECT * FROM #{quote_table_name(from)}").each do |row|
<ide> sql = "INSERT INTO #{quoted_to} (#{quoted_columns}) VALUES ("
<del> sql << columns.map {|col| quote row[column_mappings[col]]} * ', '
<add>
<add> column_values = columns.map do |col|
<add> quote(row[column_mappings[col]], raw_column_mappings[col])
<add> end
<add>
<add> sql << column_values * ', '
<ide> sql << ')'
<ide> exec_query sql
<ide> end
<ide><path>activerecord/test/cases/adapters/sqlite3/copy_table_test.rb
<ide> require "cases/helper"
<ide>
<ide> class CopyTableTest < ActiveRecord::TestCase
<del> fixtures :customers, :companies, :comments
<add> fixtures :customers, :companies, :comments, :binaries
<ide>
<ide> def setup
<ide> @connection = ActiveRecord::Base.connection
<ide> def test_copy_table_with_unconventional_primary_key
<ide> end
<ide> end
<ide>
<add> def test_copy_table_with_binary_column
<add> test_copy_table 'binaries', 'binaries2'
<add> end
<add>
<ide> protected
<ide> def copy_table(from, to, options = {})
<ide> @connection.copy_table(from, to, {:temporary => true}.merge(options)) | 3 |
Javascript | Javascript | add basic popover with additional information | e421df81512839651092f107d00fc190b09e3d7a | <ide><path>fixtures/attribute-behavior/src/App.js
<ide> function getRenderedAttributeValue(renderer, attribute, type) {
<ide> container = document.createElement(containerTagName);
<ide> }
<ide>
<add> let testValue;
<ide> let defaultValue;
<ide> try {
<ide> const read = attribute.read || getProperty(attribute.name);
<ide>
<del> let testValue = type.testValue;
<add> testValue = type.testValue;
<ide> if (attribute.overrideStringValue !== undefined) {
<ide> switch (type.name) {
<ide> case 'string':
<ide> function getRenderedAttributeValue(renderer, attribute, type) {
<ide> const result = read(container.firstChild);
<ide>
<ide> return {
<add> testValue,
<add> defaultValue,
<ide> defaultValue,
<ide> result,
<ide> didWarn: _didWarn,
<ide> didError: false,
<ide> };
<ide> } catch (error) {
<ide> return {
<add> testValue,
<add> defaultValue,
<ide> defaultValue,
<ide> result: null,
<ide> didWarn: _didWarn,
<ide> function RendererResult({version, result, defaultValue, didWarn, didError}) {
<ide> return <div css={style}>{displayResult}</div>;
<ide> }
<ide>
<del>function Result(props) {
<del> const {react15, react16, hasSameBehavior} = props;
<del> const style = {position: 'absolute', width: '100%', height: '100%'};
<del> if (!hasSameBehavior) {
<del> style.border = '4px solid purple';
<del> }
<add>function ResultPopover(props) {
<ide> return (
<del> <div css={style}>
<del> <div css={{position: 'absolute', width: '50%', height: '100%'}}>
<del> <RendererResult version={15} {...react15} />
<del> </div>
<add> <pre
<add> css={{
<add> padding: '1em',
<add> width: '25em',
<add> }}>
<add> {JSON.stringify(
<add> {
<add> react15: props.react15,
<add> react16: props.react16,
<add> hasSameBehavior: props.hasSameBehavior,
<add> },
<add> null,
<add> 2,
<add> )}
<add> </pre>
<add> );
<add>}
<add>
<add>class Result extends React.Component {
<add> state = {showInfo: false};
<add> onMouseEnter = () => {
<add> if (this.timeout) {
<add> clearTimeout(this.timeout);
<add> }
<add> this.timeout = setTimeout(() => {
<add> this.setState({showInfo: true});
<add> }, 250);
<add> };
<add> onMouseLeave = () => {
<add> if (this.timeout) {
<add> clearTimeout(this.timeout);
<add> }
<add> this.setState({showInfo: false});
<add> };
<add>
<add> componentWillUnmount() {
<add> if (this.timeout) {
<add> clearTimeout(this.interval);
<add> }
<add> }
<add>
<add> render() {
<add> const {react15, react16, hasSameBehavior} = this.props;
<add> const style = {
<add> position: 'absolute',
<add> width: '100%',
<add> height: '100%',
<add> };
<add>
<add> let highlight = null;
<add> let popover = null;
<add> if (this.state.showInfo) {
<add> highlight = (
<add> <div
<add> css={{
<add> position: 'absolute',
<add> height: '100%',
<add> width: '100%',
<add> border: '2px solid blue',
<add> }}
<add> />
<add> );
<add>
<add> popover = (
<add> <div
<add> css={{
<add> backgroundColor: 'white',
<add> border: '1px solid black',
<add> position: 'absolute',
<add> top: '100%',
<add> zIndex: 999,
<add> }}>
<add> <ResultPopover {...this.props} />
<add> </div>
<add> );
<add> }
<add>
<add> if (!hasSameBehavior) {
<add> style.border = '4px solid purple';
<add> }
<add> return (
<ide> <div
<del> css={{position: 'absolute', width: '50%', left: '50%', height: '100%'}}>
<del> <RendererResult version={16} {...react16} />
<add> css={style}
<add> onMouseEnter={this.onMouseEnter}
<add> onMouseLeave={this.onMouseLeave}>
<add> <div css={{position: 'absolute', width: '50%', height: '100%'}}>
<add> <RendererResult version={15} {...react15} />
<add> </div>
<add> <div
<add> css={{
<add> position: 'absolute',
<add> width: '50%',
<add> left: '50%',
<add> height: '100%',
<add> }}>
<add> <RendererResult version={16} {...react16} />
<add> </div>
<add> {highlight}
<add> {popover}
<ide> </div>
<del> </div>
<del> );
<add> );
<add> }
<ide> }
<ide>
<ide> function ColumnHeader({children}) { | 1 |
Javascript | Javascript | get showupcomingchange from env.json | c6aa6ddbcdff2ed69c4cf69f15dc72684a5381fc | <ide><path>curriculum/getChallenges.js
<ide> const { helpCategoryMap } = require('../client/utils/challengeTypes');
<ide> const { curriculum: curriculumLangs } =
<ide> require('../config/i18n/all-langs').availableLangs;
<ide>
<add>const { showUpcomingChanges } = require('../config/env.json');
<add>
<ide> const access = util.promisify(fs.access);
<ide>
<ide> const challengesDir = path.resolve(__dirname, './challenges');
<ide> async function buildBlocks({ basename: blockName }, curriculum, superBlock) {
<ide> );
<ide> }
<ide>
<del> if (!isUpcomingChange || process.env.SHOW_UPCOMING_CHANGES === 'true') {
<add> if (!isUpcomingChange || showUpcomingChanges) {
<ide> // add the block to the superBlock
<ide> const blockInfo = { meta: blockMeta, challenges: [] };
<ide> curriculum[superBlock].blocks[blockName] = blockInfo; | 1 |
Text | Text | fix syntax typo in readme.md | 6971556ab83a5a3edd2f99d322b0954499393d2b | <ide><path>README.md
<ide> pip install transformers
<ide> Here also, you first need to install one of, or both, TensorFlow 2.0 and PyTorch.
<ide> Please refere to [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2.0-rc-is-available) and/or [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform.
<ide>
<del>When TensorFlow 2.0 and/or PyTorch has been installed, you can install from source by cloning the repository and runing:
<add>When TensorFlow 2.0 and/or PyTorch has been installed, you can install from source by cloning the repository and running:
<ide>
<ide> ```bash
<ide> pip install [--editable] .
<ide> ```
<ide>
<ide> ### Tests
<ide>
<del>A series of tests is included for the library and the example scripts. Library tests can be found in the [tests folder](https://github.com/huggingface/transformers/tree/master/transformers/tests) and examples tests in the [examples folder](https://github.com/huggingface/transformers/tree/master/examples).
<add>A series of tests are included for the library and the example scripts. Library tests can be found in the [tests folder](https://github.com/huggingface/transformers/tree/master/transformers/tests) and examples tests in the [examples folder](https://github.com/huggingface/transformers/tree/master/examples).
<ide>
<ide> These tests can be run using `pytest` (install pytest if needed with `pip install pytest`).
<ide>
<ide> This is the model provided as `bert-large-uncased-whole-word-masking-finetuned-s
<ide> ### `run_generation.py`: Text generation with GPT, GPT-2, Transformer-XL and XLNet
<ide>
<ide> A conditional generation script is also included to generate text from a prompt.
<del>The generation script includes the [tricks](https://github.com/rusiaaman/XLNet-gen#methodology) proposed by Aman Rusia to get high quality generation with memory models like Transformer-XL and XLNet (include a predefined text to make short inputs longer).
<add>The generation script includes the [tricks](https://github.com/rusiaaman/XLNet-gen#methodology) proposed by Aman Rusia to get high-quality generation with memory models like Transformer-XL and XLNet (include a predefined text to make short inputs longer).
<ide>
<ide> Here is how to run the script with the small version of OpenAI GPT-2 model:
<ide>
<ide> Here is a quick summary of what you should take care of when migrating from `pyt
<ide>
<ide> The main breaking change when migrating from `pytorch-pretrained-bert` to `transformers` is that the models forward method always outputs a `tuple` with various elements depending on the model and the configuration parameters.
<ide>
<del>The exact content of the tuples for each model are detailed in the models' docstrings and the [documentation](https://huggingface.co/transformers/).
<add>The exact content of the tuples for each model is detailed in the models' docstrings and the [documentation](https://huggingface.co/transformers/).
<ide>
<ide> In pretty much every case, you will be fine by taking the first element of the output as the output you previously used in `pytorch-pretrained-bert`.
<ide>
<ide> By enabling the configuration option `output_hidden_states`, it was possible to
<ide>
<ide> ### Serialization
<ide>
<del>Breaking change in the `from_pretrained()`method:
<add>Breaking change in the `from_pretrained()` method:
<ide>
<ide> 1. Models are now set in evaluation mode by default when instantiated with the `from_pretrained()` method. To train them don't forget to set them back in training mode (`model.train()`) to activate the dropout modules.
<ide>
<ide> for batch in train_data:
<ide>
<ide> ## Citation
<ide>
<del>At the moment, there is no paper associated to Transformers but we are working on preparing one. In the meantime, please include a mention of the library and a link to the present repository if you use this work in a published or open-source project.
<add>At the moment, there is no paper associated with Transformers but we are working on preparing one. In the meantime, please include a mention of the library and a link to the present repository if you use this work in a published or open-source project. | 1 |
Mixed | Python | add flax image captioning example | 9f89fa02ed3fbb137ed8fce1d0ab196a07dc1141 | <ide><path>examples/flax/image-captioning/README.md
<add># Image Captioning (vision-encoder-text-decoder model) training example
<add>
<add>The following example showcases how to finetune a vision-encoder-text-decoder model for image captioning
<add>using the JAX/Flax backend, leveraging 🤗 Transformers library's [FlaxVisionEncoderDecoderModel](https://huggingface.co/docs/transformers/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel).
<add>
<add>JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU.
<add>Models written in JAX/Flax are **immutable** and updated in a purely functional
<add>way which enables simple and efficient model parallelism.
<add>
<add>`run_image_captioning_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets
<add>library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it.
<add>
<add>For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets.html#json-files and you also will find examples of these below.
<add>
<add>### Download COCO dataset (2017)
<add>This example uses COCO dataset (2017) through a custom dataset script, which requires users to manually download the
<add>COCO dataset before training.
<add>
<add>```bash
<add>mkdir data
<add>cd data
<add>wget http://images.cocodataset.org/zips/train2017.zip
<add>wget http://images.cocodataset.org/zips/val2017.zip
<add>wget http://images.cocodataset.org/zips/test2017.zip
<add>wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
<add>wget http://images.cocodataset.org/annotations/image_info_test2017.zip
<add>cd ..
<add>```
<add>
<add>### Create a model from a vision encoder model and a text decoder model
<add>Next, we create a [FlaxVisionEncoderDecoderModel](https://huggingface.co/docs/transformers/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel) instance from a pre-trained vision encoder ([ViT](https://huggingface.co/docs/transformers/model_doc/vit#transformers.FlaxViTModel)) and a pre-trained text decoder ([GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2#transformers.FlaxGPT2Model)):
<add>
<add>```bash
<add>python3 create_model_from_encoder_decoder_models.py \
<add> --output_dir model \
<add> --encoder_model_name_or_path google/vit-base-patch16-224-in21k \
<add> --decoder_model_name_or_path gpt2
<add>```
<add>
<add>### Train the model
<add>Finally, we can run the example script to train the model:
<add>
<add>```bash
<add>python3 run_image_captioning_flax.py \
<add> --output_dir ./image-captioning-training-results \
<add> --model_name_or_path model \
<add> --dataset_name ydshieh/coco_dataset_script \
<add> --dataset_config_name=2017 \
<add> --data_dir $PWD/data \
<add> --image_column image_path \
<add> --caption_column caption \
<add> --do_train --do_eval --predict_with_generate \
<add> --num_train_epochs 1 \
<add> --eval_steps 500 \
<add> --learning_rate 3e-5 --warmup_steps 0 \
<add> --per_device_train_batch_size 32 \
<add> --per_device_eval_batch_size 32 \
<add> --overwrite_output_dir \
<add> --max_target_length 32 \
<add> --num_beams 8 \
<add> --preprocessing_num_workers 16 \
<add> --logging_steps 10 \
<add> --block_size 16384 \
<add> --push_to_hub
<add>```
<add>
<add>This should finish in about 1h30 on Cloud TPU, with validation loss and ROUGE2 score of 2.0153 and 14.64 respectively
<add>after 1 epoch. Training statistics can be accessed on [Models](https://huggingface.co/ydshieh/image-captioning-training-results/tensorboard).
<ide><path>examples/flax/image-captioning/create_model_from_encoder_decoder_models.py
<add>#!/usr/bin/env python
<add># coding=utf-8
<add># Copyright 2022 The HuggingFace Team All rights reserved.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>"""
<add>Create a VisionEncoderDecoderModel instance from pretrained encoder/decoder models.
<add>
<add>The cross-attention will be randomly initialized.
<add>"""
<add>
<add>from dataclasses import dataclass, field
<add>from typing import Optional
<add>
<add>from transformers import (
<add> AutoConfig,
<add> AutoFeatureExtractor,
<add> AutoTokenizer,
<add> FlaxVisionEncoderDecoderModel,
<add> HfArgumentParser,
<add>)
<add>
<add>
<add>@dataclass
<add>class ModelArguments:
<add> """
<add> Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
<add> """
<add>
<add> output_dir: str = field(
<add> metadata={"help": "The output directory where the model will be written."},
<add> )
<add> encoder_model_name_or_path: str = field(
<add> metadata={
<add> "help": "The encoder model checkpoint for weights initialization."
<add> "Don't set if you want to train an encoder model from scratch."
<add> },
<add> )
<add> decoder_model_name_or_path: str = field(
<add> metadata={
<add> "help": "The decoder model checkpoint for weights initialization."
<add> "Don't set if you want to train a decoder model from scratch."
<add> },
<add> )
<add> encoder_config_name: Optional[str] = field(
<add> default=None, metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"}
<add> )
<add> decoder_config_name: Optional[str] = field(
<add> default=None, metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"}
<add> )
<add>
<add>
<add>def main():
<add> parser = HfArgumentParser((ModelArguments,))
<add> (model_args,) = parser.parse_args_into_dataclasses()
<add>
<add> # Load pretrained model and tokenizer
<add>
<add> # Use explicit specified encoder config
<add> if model_args.encoder_config_name:
<add> encoder_config = AutoConfig.from_pretrained(model_args.encoder_config_name)
<add> # Use pretrained encoder model's config
<add> else:
<add> encoder_config = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path)
<add>
<add> # Use explicit specified decoder config
<add> if model_args.decoder_config_name:
<add> decoder_config = AutoConfig.from_pretrained(model_args.decoder_config_name)
<add> # Use pretrained decoder model's config
<add> else:
<add> decoder_config = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path)
<add>
<add> # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
<add> decoder_config.is_decoder = True
<add> decoder_config.add_cross_attention = True
<add>
<add> model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
<add> encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path,
<add> decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path,
<add> encoder_config=encoder_config,
<add> decoder_config=decoder_config,
<add> )
<add>
<add> # GPT2 only has bos/eos tokens but not decoder_start/pad tokens
<add> decoder_start_token_id = decoder_config.decoder_start_token_id
<add> pad_token_id = decoder_config.pad_token_id
<add> if decoder_start_token_id is None:
<add> decoder_start_token_id = decoder_config.bos_token_id
<add> if pad_token_id is None:
<add> pad_token_id = decoder_config.eos_token_id
<add>
<add> # This is necessary to make Flax's generate() work
<add> model.config.eos_token_id = decoder_config.eos_token_id
<add> model.config.decoder_start_token_id = decoder_start_token_id
<add> model.config.pad_token_id = pad_token_id
<add>
<add> feature_extractor = AutoFeatureExtractor.from_pretrained(model_args.encoder_model_name_or_path)
<add>
<add> tokenizer = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path)
<add> tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
<add>
<add> model.save_pretrained(model_args.output_dir)
<add> feature_extractor.save_pretrained(model_args.output_dir)
<add> tokenizer.save_pretrained(model_args.output_dir)
<add>
<add>
<add>if __name__ == "__main__":
<add> main()
<ide><path>examples/flax/image-captioning/run_image_captioning_flax.py
<add>#!/usr/bin/env python
<add># coding=utf-8
<add># Copyright 2022 The HuggingFace Team All rights reserved.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>"""
<add>Fine-tuning the library vision-encoder-decoder models for image captioning.
<add>"""
<add>
<add>import json
<add>import logging
<add>import os
<add>import sys
<add>import time
<add>from dataclasses import asdict, dataclass, field
<add>from enum import Enum
<add>from functools import partial
<add>from pathlib import Path
<add>from typing import Callable, Optional
<add>
<add>import datasets
<add>import nltk # Here to have a nice missing dependency error message early on
<add>import numpy as np
<add>from datasets import Dataset, load_dataset, load_metric
<add>from PIL import Image
<add>from tqdm import tqdm
<add>
<add>import jax
<add>import jax.numpy as jnp
<add>import optax
<add>import transformers
<add>from filelock import FileLock
<add>from flax import jax_utils, traverse_util
<add>from flax.jax_utils import unreplicate
<add>from flax.training import train_state
<add>from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
<add>from huggingface_hub import Repository
<add>from transformers import (
<add> AutoFeatureExtractor,
<add> AutoTokenizer,
<add> FlaxVisionEncoderDecoderModel,
<add> HfArgumentParser,
<add> is_tensorboard_available,
<add>)
<add>from transformers.file_utils import get_full_repo_name, is_offline_mode
<add>
<add>
<add>logger = logging.getLogger(__name__)
<add>
<add>try:
<add> nltk.data.find("tokenizers/punkt")
<add>except (LookupError, OSError):
<add> if is_offline_mode():
<add> raise LookupError(
<add> "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
<add> )
<add> with FileLock(".lock") as lock:
<add> nltk.download("punkt", quiet=True)
<add>
<add>
<add># Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
<add>def shift_tokens_right(input_ids: np.ndarray, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray:
<add> """
<add> Shift input ids one token to the right.
<add> """
<add> shifted_input_ids = np.zeros_like(input_ids)
<add> shifted_input_ids[:, 1:] = input_ids[:, :-1]
<add> shifted_input_ids[:, 0] = decoder_start_token_id
<add>
<add> shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
<add> return shifted_input_ids
<add>
<add>
<add>@dataclass
<add>class TrainingArguments:
<add> output_dir: str = field(
<add> metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
<add> )
<add> overwrite_output_dir: bool = field(
<add> default=False,
<add> metadata={
<add> "help": (
<add> "Overwrite the content of the output directory. "
<add> "Use this to continue training if output_dir points to a checkpoint directory."
<add> )
<add> },
<add> )
<add> do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
<add> do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
<add> do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
<add> per_device_train_batch_size: int = field(
<add> default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
<add> )
<add> per_device_eval_batch_size: int = field(
<add> default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
<add> )
<add> _block_size_doc = """
<add> The default value `0` will preprocess (tokenization + feature extraction) the whole dataset before training and
<add> cache the results. This uses more disk space, but avoids (repeated) processing time during training. This is a
<add> good option if your disk space is large enough to store the whole processed dataset.
<add> If a positive value is given, the captions in the dataset will be tokenized before training and the results are
<add> cached. During training, it iterates the dataset in chunks of size `block_size`. On each block, images are
<add> transformed by the feature extractor with the results being kept in memory (no cache), and batches of size
<add> `batch_size` are yielded before processing the next block. This could avoid the heavy disk usage when the
<add> dataset is large.
<add> """
<add> block_size: int = field(default=0, metadata={"help": _block_size_doc})
<add> learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."})
<add> weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."})
<add> adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"})
<add> adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"})
<add> adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."})
<add> label_smoothing_factor: float = field(
<add> default=0.0, metadata={"help": "The label smoothing epsilon to apply (zero means no label smoothing)."}
<add> )
<add> num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
<add> warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
<add> logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
<add> eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
<add> seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
<add> push_to_hub: bool = field(
<add> default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
<add> )
<add> hub_model_id: str = field(
<add> default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
<add> )
<add> hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."})
<add>
<add> def __post_init__(self):
<add> if self.output_dir is not None:
<add> self.output_dir = os.path.expanduser(self.output_dir)
<add>
<add> def to_dict(self):
<add> """
<add> Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates
<add> the token values by removing their value.
<add> """
<add> d = asdict(self)
<add> for k, v in d.items():
<add> if isinstance(v, Enum):
<add> d[k] = v.value
<add> if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):
<add> d[k] = [x.value for x in v]
<add> if k.endswith("_token"):
<add> d[k] = f"<{k.upper()}>"
<add> return d
<add>
<add>
<add>@dataclass
<add>class ModelArguments:
<add> """
<add> Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
<add> """
<add>
<add> model_name_or_path: str = field(
<add> metadata={"help": "The model checkpoint for weights initialization."},
<add> )
<add> cache_dir: Optional[str] = field(
<add> default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
<add> )
<add> use_fast_tokenizer: bool = field(
<add> default=True,
<add> metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
<add> )
<add> dtype: Optional[str] = field(
<add> default="float32",
<add> metadata={
<add> "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
<add> },
<add> )
<add>
<add>
<add>@dataclass
<add>class DataTrainingArguments:
<add> """
<add> Arguments pertaining to what data we are going to input our model for training and eval.
<add> """
<add>
<add> dataset_name: Optional[str] = field(
<add> default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
<add> )
<add> dataset_config_name: Optional[str] = field(
<add> default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
<add> )
<add> data_dir: Optional[str] = field(
<add> default=None, metadata={"help": "The data directory of the dataset to use (via the datasets library)."}
<add> )
<add> image_column: Optional[str] = field(
<add> default=None,
<add> metadata={"help": "The name of the column in the datasets containing the full image file paths."},
<add> )
<add> caption_column: Optional[str] = field(
<add> default=None,
<add> metadata={"help": "The name of the column in the datasets containing the image captions."},
<add> )
<add> train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
<add> validation_file: Optional[str] = field(
<add> default=None,
<add> metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
<add> )
<add> test_file: Optional[str] = field(
<add> default=None,
<add> metadata={"help": "An optional input predict data file to do prediction on (a text file)."},
<add> )
<add> max_target_length: Optional[int] = field(
<add> default=128,
<add> metadata={
<add> "help": "The maximum total sequence length for target text after tokenization. Sequences longer "
<add> "than this will be truncated, sequences shorter will be padded."
<add> },
<add> )
<add> val_max_target_length: Optional[int] = field(
<add> default=None,
<add> metadata={
<add> "help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
<add> "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
<add> "This argument is also used to override the `max_length` param of `model.generate`, which is used "
<add> "during evaluation."
<add> },
<add> )
<add> max_train_samples: Optional[int] = field(
<add> default=None,
<add> metadata={
<add> "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
<add> "value if set."
<add> },
<add> )
<add> max_eval_samples: Optional[int] = field(
<add> default=None,
<add> metadata={
<add> "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
<add> "value if set."
<add> },
<add> )
<add> max_predict_samples: Optional[int] = field(
<add> default=None,
<add> metadata={
<add> "help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
<add> "value if set."
<add> },
<add> )
<add> preprocessing_num_workers: Optional[int] = field(
<add> default=None,
<add> metadata={"help": "The number of processes to use for the preprocessing."},
<add> )
<add> predict_with_generate: bool = field(
<add> default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."}
<add> )
<add> num_beams: Optional[int] = field(
<add> default=None,
<add> metadata={
<add> "help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`, "
<add> "which is used during evaluation."
<add> },
<add> )
<add> overwrite_cache: bool = field(
<add> default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
<add> )
<add>
<add> def __post_init__(self):
<add> if self.dataset_name is None and self.train_file is None and self.validation_file is None:
<add> raise ValueError("Need either a dataset name or a training/validation file.")
<add> else:
<add> if self.train_file is not None:
<add> extension = self.train_file.split(".")[-1]
<add> assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
<add> if self.validation_file is not None:
<add> extension = self.validation_file.split(".")[-1]
<add> assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
<add> if self.val_max_target_length is None:
<add> self.val_max_target_length = self.max_target_length
<add>
<add>
<add>image_captioning_name_mapping = {
<add> "image_caption_dataset.py": ("image_path", "caption"),
<add>}
<add>
<add>
<add>class TrainState(train_state.TrainState):
<add> dropout_rng: jnp.ndarray
<add>
<add> def replicate(self):
<add> return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
<add>
<add>
<add>def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
<add> """
<add> Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
<add> Shuffle batches if `shuffle` is `True`.
<add> """
<add> steps = len(dataset) // batch_size # Skip incomplete batch.
<add>
<add> # We use `numpy.ndarray` to interact with `datasets.Dataset`, since using `jax.numpy.array` to index into a
<add> # dataset is significantly slow. Using JAX array at the 1st place is only to keep JAX's PRNGs generation
<add> # mechanism, which works differently from NumPy/SciPy.
<add> if shuffle:
<add> batch_idx = jax.random.permutation(rng, len(dataset))
<add> batch_idx = np.asarray(batch_idx)
<add> else:
<add> batch_idx = np.arange(len(dataset))
<add>
<add> for idx in range(steps):
<add>
<add> start_idx = batch_size * idx
<add> end_idx = batch_size * (idx + 1)
<add>
<add> selected_indices = batch_idx[start_idx:end_idx]
<add> batch = dataset[selected_indices]
<add> batch = shard(batch)
<add>
<add> yield batch
<add>
<add>
<add>def write_metric(summary_writer, metrics, train_time, step, metric_key_prefix="train"):
<add>
<add> if train_time:
<add> summary_writer.scalar("train_time", train_time, step)
<add>
<add> metrics = get_metrics(metrics)
<add> for key, vals in metrics.items():
<add> tag = f"{metric_key_prefix}_{key}"
<add> for i, val in enumerate(vals):
<add> summary_writer.scalar(tag, val, step - len(vals) + i + 1)
<add>
<add> else:
<add> for metric_name, value in metrics.items():
<add> summary_writer.scalar(f"{metric_key_prefix}_{metric_name}", value, step)
<add>
<add>
<add>def create_learning_rate_fn(
<add> train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
<add>) -> Callable[[int], jnp.array]:
<add> """Returns a linear warmup, linear_decay learning rate function."""
<add> steps_per_epoch = train_ds_size // train_batch_size
<add> num_train_steps = steps_per_epoch * num_train_epochs
<add> warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
<add> decay_fn = optax.linear_schedule(
<add> init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
<add> )
<add> schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
<add> return schedule_fn
<add>
<add>
<add>def main():
<add> # See all possible arguments in src/transformers/training_args.py
<add> # or by passing the --help flag to this script.
<add> # We now keep distinct sets of args, for a cleaner separation of concerns.
<add>
<add> parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
<add> if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
<add> # If we pass only one argument to the script and it's the path to a json file,
<add> # let's parse it to get our arguments.
<add> model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
<add> else:
<add> model_args, data_args, training_args = parser.parse_args_into_dataclasses()
<add>
<add> if (
<add> os.path.exists(training_args.output_dir)
<add> and os.listdir(training_args.output_dir)
<add> and training_args.do_train
<add> and not training_args.overwrite_output_dir
<add> ):
<add> raise ValueError(
<add> f"Output directory ({training_args.output_dir}) already exists and is not empty."
<add> "Use --overwrite_output_dir to overcome."
<add> )
<add>
<add> # Make one log on every process with the configuration for debugging.
<add> logging.basicConfig(
<add> format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
<add> datefmt="%m/%d/%Y %H:%M:%S",
<add> level=logging.INFO,
<add> )
<add> # Setup logging, we only want one process per machine to log things on the screen.
<add> logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
<add> if jax.process_index() == 0:
<add> datasets.utils.logging.set_verbosity_warning()
<add> transformers.utils.logging.set_verbosity_info()
<add> else:
<add> datasets.utils.logging.set_verbosity_error()
<add> transformers.utils.logging.set_verbosity_error()
<add>
<add> # Set the verbosity to info of the Transformers logger (on main process only):
<add> logger.info(f"Training/evaluation parameters {training_args}")
<add>
<add> # Handle the repository creation
<add> if training_args.push_to_hub:
<add> if training_args.hub_model_id is None:
<add> repo_name = get_full_repo_name(
<add> Path(training_args.output_dir).absolute().name, token=training_args.hub_token
<add> )
<add> else:
<add> repo_name = training_args.hub_model_id
<add> repo = Repository(training_args.output_dir, clone_from=repo_name)
<add>
<add> # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
<add> # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
<add> # (the dataset will be downloaded automatically from the datasets Hub).
<add> #
<add> # For CSV/JSON files this script will use the first column for the full image path and the second column for the
<add> # captions (unless you specify column names for this with the `image_column` and `caption_column` arguments).
<add> #
<add> if data_args.dataset_name is not None:
<add> # Downloading and loading a dataset from the hub.
<add> dataset = load_dataset(
<add> data_args.dataset_name,
<add> data_args.dataset_config_name,
<add> cache_dir=model_args.cache_dir,
<add> keep_in_memory=False,
<add> data_dir=data_args.data_dir,
<add> )
<add> else:
<add> data_files = {}
<add> if data_args.train_file is not None:
<add> data_files["train"] = data_args.train_file
<add> extension = data_args.train_file.split(".")[-1]
<add> if data_args.validation_file is not None:
<add> data_files["validation"] = data_args.validation_file
<add> extension = data_args.validation_file.split(".")[-1]
<add> if data_args.test_file is not None:
<add> data_files["test"] = data_args.test_file
<add> extension = data_args.test_file.split(".")[-1]
<add> dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
<add> # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
<add> # https://huggingface.co/docs/datasets/loading_datasets.html.
<add>
<add> # Load pretrained model and tokenizer
<add> model = FlaxVisionEncoderDecoderModel.from_pretrained(
<add> model_args.model_name_or_path,
<add> seed=training_args.seed,
<add> dtype=getattr(jnp, model_args.dtype),
<add> )
<add> feature_extractor = AutoFeatureExtractor.from_pretrained(
<add> model_args.model_name_or_path, cache_dir=model_args.cache_dir
<add> )
<add> tokenizer = AutoTokenizer.from_pretrained(
<add> model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
<add> )
<add> tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
<add>
<add> # Preprocessing the datasets.
<add> # We need to tokenize inputs and targets.
<add> if training_args.do_train:
<add> column_names = dataset["train"].column_names
<add> elif training_args.do_eval:
<add> column_names = dataset["validation"].column_names
<add> elif training_args.do_predict:
<add> column_names = dataset["test"].column_names
<add> else:
<add> logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
<add> return
<add>
<add> # Get the column names for input/target.
<add> dataset_columns = image_captioning_name_mapping.get(data_args.dataset_name, None)
<add> if data_args.image_column is None:
<add> assert dataset_columns is not None
<add> image_column = dataset_columns[0]
<add> else:
<add> image_column = data_args.image_column
<add> if image_column not in column_names:
<add> raise ValueError(
<add> f"--image_column' value '{data_args.image_column}' needs to be one of: {', '.join(column_names)}"
<add> )
<add> if data_args.caption_column is None:
<add> assert dataset_columns is not None
<add> caption_column = dataset_columns[1]
<add> else:
<add> caption_column = data_args.caption_column
<add> if caption_column not in column_names:
<add> raise ValueError(
<add> f"--caption_column' value '{data_args.caption_column}' needs to be one of: {', '.join(column_names)}"
<add> )
<add>
<add> # In Flax, for seq2seq models we need to pass `decoder_input_ids`
<add> # as the Flax models don't accept `labels`, we need to prepare the decoder_input_ids here
<add> # for that dynamically import the `shift_tokens_right` function from the model file
<add> model_module = __import__(model.__module__, fromlist=["shift_tokens_right"])
<add> shift_tokens_right_fn = getattr(model_module, "shift_tokens_right", shift_tokens_right)
<add>
<add> def filter_fn(examples):
<add> """remove problematic images"""
<add>
<add> bools = []
<add> for image_file in examples[image_column]:
<add> try:
<add> image = Image.open(image_file)
<add> feature_extractor(images=image, return_tensors="np")
<add> bools.append(True)
<add> except Exception:
<add> bools.append(False)
<add>
<add> return bools
<add>
<add> # Setting padding="max_length" as we need fixed length inputs for jitted functions
<add> def tokenization_fn(examples, max_target_length):
<add> """Run tokenization on captions."""
<add>
<add> captions = []
<add> for caption in examples[caption_column]:
<add> captions.append(caption.lower() + " " + tokenizer.eos_token)
<add> targets = captions
<add>
<add> model_inputs = {}
<add> # Setup the tokenizer for targets
<add> with tokenizer.as_target_tokenizer():
<add> labels = tokenizer(
<add> targets, max_length=max_target_length, padding="max_length", truncation=True, return_tensors="np"
<add> )
<add> model_inputs["labels"] = labels["input_ids"]
<add> decoder_input_ids = shift_tokens_right_fn(
<add> labels["input_ids"], model.config.pad_token_id, model.config.decoder_start_token_id
<add> )
<add> model_inputs["decoder_input_ids"] = np.asarray(decoder_input_ids)
<add> # We need decoder_attention_mask so we can ignore pad tokens from loss
<add> model_inputs["decoder_attention_mask"] = labels["attention_mask"]
<add> model_inputs[image_column] = examples[image_column]
<add>
<add> return model_inputs
<add>
<add> def feature_extraction_fn(examples, check_image=True):
<add> """
<add> Run feature extraction on images
<add>
<add> If `check_image` is `True`, the examples that fails during `Image.open()` will be caught and discarded.
<add> Otherwise, an exception will be thrown.
<add> """
<add>
<add> model_inputs = {}
<add>
<add> if check_image:
<add> images = []
<add> to_keep = []
<add> for image_file in examples[image_column]:
<add> try:
<add> img = Image.open(image_file)
<add> images.append(img)
<add> to_keep.append(True)
<add> except Exception:
<add> to_keep.append(False)
<add>
<add> for k, v in examples.items():
<add> if k != image_column:
<add> model_inputs[k] = v[to_keep]
<add> else:
<add> images = [Image.open(image_file) for image_file in examples[image_column]]
<add>
<add> encoder_inputs = feature_extractor(images=images, return_tensors="np")
<add> model_inputs["pixel_values"] = encoder_inputs.pixel_values
<add>
<add> return model_inputs
<add>
<add> def preprocess_fn(examples, max_target_length, check_image=True):
<add> """Run tokenization + image feature extraction"""
<add>
<add> model_inputs = {}
<add> # This contains image path column
<add> model_inputs.update(tokenization_fn(examples, max_target_length))
<add> model_inputs.update(feature_extraction_fn(model_inputs, check_image=check_image))
<add> # Remove image path column
<add> model_inputs.pop(image_column)
<add>
<add> return model_inputs
<add>
<add> features = datasets.Features(
<add> {
<add> "pixel_values": datasets.Array3D(
<add> shape=(
<add> getattr(model.config.encoder, "num_channels", 3),
<add> model.config.encoder.image_size,
<add> model.config.encoder.image_size,
<add> ),
<add> dtype="float32",
<add> ),
<add> "labels": datasets.Sequence(feature=datasets.Value(dtype="int32", id=None), length=-1, id=None),
<add> "decoder_input_ids": datasets.Sequence(feature=datasets.Value(dtype="int32", id=None), length=-1, id=None),
<add> "decoder_attention_mask": datasets.Sequence(
<add> feature=datasets.Value(dtype="int32", id=None), length=-1, id=None
<add> ),
<add> }
<add> )
<add>
<add> # If `block_size` is `0`, tokenization & image feature extraction is done at the beginning
<add> run_feat_ext_at_beginning = training_args.block_size == 0
<add> # Used in .map() below
<add> function_kwarg = preprocess_fn if run_feat_ext_at_beginning else tokenization_fn
<add> # `features` is used only for the final preprocessed dataset (for the performance purpose).
<add> features_kwarg = features if run_feat_ext_at_beginning else None
<add> # Keep `image_column` if the feature extraction is done during training
<add> remove_columns_kwarg = [x for x in column_names if x != image_column or run_feat_ext_at_beginning]
<add> processor_names = "tokenizer and feature extractor" if run_feat_ext_at_beginning else "tokenizer"
<add>
<add> # Store some constant
<add> train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
<add> eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
<add> if training_args.block_size % train_batch_size > 0 or training_args.block_size % eval_batch_size > 0:
<add> raise ValueError(
<add> f"`training_args.block_size` needs to be a multiple of the global train/eval batch size."
<add> f"Got {training_args.block_size}, {train_batch_size} and {eval_batch_size} respectively instead."
<add> )
<add>
<add> if training_args.do_train:
<add> if "train" not in dataset:
<add> raise ValueError("--do_train requires a train dataset")
<add> train_dataset = dataset["train"]
<add> if data_args.max_train_samples is not None:
<add> train_dataset = train_dataset.select(range(data_args.max_train_samples))
<add> # remove problematic examples
<add> # (if feature extraction is performed at the beginning, the filtering is done during preprocessing below
<add> # instead here.)
<add> if not run_feat_ext_at_beginning:
<add> train_dataset = train_dataset.filter(filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers)
<add> train_dataset = train_dataset.map(
<add> function=function_kwarg,
<add> batched=True,
<add> num_proc=data_args.preprocessing_num_workers,
<add> # kept image paths
<add> remove_columns=remove_columns_kwarg,
<add> load_from_cache_file=not data_args.overwrite_cache,
<add> desc=f"Running {processor_names} on train dataset",
<add> fn_kwargs={"max_target_length": data_args.max_target_length},
<add> features=features_kwarg,
<add> )
<add> if run_feat_ext_at_beginning:
<add> # set format (for performance) since the dataset is ready to be used
<add> train_dataset = train_dataset.with_format("numpy")
<add>
<add> steps_per_epoch = len(train_dataset) // train_batch_size
<add> num_train_examples_per_epoch = steps_per_epoch * train_batch_size
<add> num_epochs = int(training_args.num_train_epochs)
<add> total_train_steps = steps_per_epoch * num_epochs
<add> else:
<add> num_train_examples_per_epoch = 0
<add>
<add> if training_args.do_eval:
<add> if "validation" not in dataset:
<add> raise ValueError("--do_eval requires a validation dataset")
<add> eval_dataset = dataset["validation"]
<add> if data_args.max_eval_samples is not None:
<add> eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
<add> # remove problematic examples
<add> # (if feature extraction is performed at the beginning, the filtering is done during preprocessing below
<add> # instead here.)
<add> if not run_feat_ext_at_beginning:
<add> eval_dataset = eval_dataset.filter(filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers)
<add> eval_dataset = eval_dataset.map(
<add> function=function_kwarg,
<add> batched=True,
<add> num_proc=data_args.preprocessing_num_workers,
<add> # kept image paths
<add> remove_columns=remove_columns_kwarg,
<add> load_from_cache_file=not data_args.overwrite_cache,
<add> desc=f"Running {processor_names} on validation dataset",
<add> fn_kwargs={"max_target_length": data_args.val_max_target_length},
<add> features=features_kwarg,
<add> )
<add> if run_feat_ext_at_beginning:
<add> # set format (for performance) since the dataset is ready to be used
<add> eval_dataset = eval_dataset.with_format("numpy")
<add>
<add> num_eval_examples = len(eval_dataset)
<add> eval_steps = num_eval_examples // eval_batch_size
<add>
<add> if training_args.do_predict:
<add> if "test" not in dataset:
<add> raise ValueError("--do_predict requires a test dataset")
<add> predict_dataset = dataset["test"]
<add> if data_args.max_predict_samples is not None:
<add> predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
<add> # remove problematic examples
<add> # (if feature extraction is performed at the beginning, the filtering is done during preprocessing below
<add> # instead here.)
<add> if not run_feat_ext_at_beginning:
<add> predict_dataset = predict_dataset.filter(
<add> filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers
<add> )
<add> predict_dataset = predict_dataset.map(
<add> function=function_kwarg,
<add> batched=True,
<add> num_proc=data_args.preprocessing_num_workers,
<add> # kept image paths
<add> remove_columns=remove_columns_kwarg,
<add> load_from_cache_file=not data_args.overwrite_cache,
<add> desc=f"Running {processor_names} on prediction dataset",
<add> fn_kwargs={"max_target_length": data_args.val_max_target_length},
<add> features=features_kwarg,
<add> )
<add> if run_feat_ext_at_beginning:
<add> # set format (for performance) since the dataset is ready to be used
<add> predict_dataset = predict_dataset.with_format("numpy")
<add>
<add> num_test_examples = len(predict_dataset)
<add> test_steps = num_test_examples // eval_batch_size
<add>
<add> def blockwise_data_loader(
<add> rng: jax.random.PRNGKey,
<add> ds: Dataset,
<add> block_size: int,
<add> batch_size: int,
<add> shuffle: bool = False,
<add> keep_in_memory: bool = False,
<add> split: str = "",
<add> ):
<add> """
<add> Wrap the simple `data_loader` in a block-wise way if `block_size` > 0, else it's the same as `data_loader`.
<add>
<add> If `block_size` > 0, it requires `ds` to have a column that gives image paths in order to perform image feature
<add> extraction (with the column name being specified by `image_column`). The tokenization should be done before
<add> training in this case.
<add> """
<add>
<add> # We use `numpy.ndarray` to interact with `datasets.Dataset`, since using `jax.numpy.array` to index into a
<add> # dataset is significantly slow. Using JAX array at the 1st place is only to keep JAX's PRNGs generation
<add> # mechanism, which works differently from NumPy/SciPy.
<add> if shuffle:
<add> indices = jax.random.permutation(rng, len(ds))
<add> indices = np.asarray(indices)
<add> else:
<add> indices = np.arange(len(ds))
<add>
<add> _block_size = len(ds) if not block_size else block_size
<add>
<add> steps_per_block = _block_size // batch_size
<add> num_examples = len(ds)
<add> steps = num_examples // batch_size
<add> num_splits = steps // steps_per_block + int(steps % steps_per_block > 0)
<add>
<add> for idx in range(num_splits):
<add>
<add> if not block_size:
<add> _ds = ds
<add> else:
<add>
<add> start_idx = block_size * idx
<add> end_idx = block_size * (idx + 1)
<add>
<add> selected_indices = indices[start_idx:end_idx]
<add>
<add> _ds = ds.select(selected_indices)
<add>
<add> _ds = _ds.map(
<add> feature_extraction_fn,
<add> batched=True,
<add> num_proc=data_args.preprocessing_num_workers,
<add> remove_columns=[image_column],
<add> load_from_cache_file=not data_args.overwrite_cache,
<add> features=features,
<add> keep_in_memory=keep_in_memory,
<add> # The images are already checked either in `.filter()` or in `preprocess_fn()`
<add> fn_kwargs={"check_image": False},
<add> desc=f"Running feature extraction on {split} dataset".replace(" ", " "),
<add> )
<add> _ds = _ds.with_format("numpy")
<add>
<add> # No need to shuffle here
<add> loader = data_loader(rng, _ds, batch_size=batch_size, shuffle=False)
<add>
<add> for batch in loader:
<add> yield batch
<add>
<add> # Metric
<add> metric = load_metric("rouge")
<add>
<add> def postprocess_text(preds, labels):
<add> preds = [pred.strip() for pred in preds]
<add> labels = [label.strip() for label in labels]
<add>
<add> # rougeLSum expects newline after each sentence
<add> preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
<add> labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
<add>
<add> return preds, labels
<add>
<add> def compute_metrics(preds, labels):
<add> decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
<add> decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
<add>
<add> # Some simple post-processing
<add> decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
<add>
<add> result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
<add> # Extract a few results from ROUGE
<add> result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
<add>
<add> prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
<add> result["gen_len"] = np.mean(prediction_lens)
<add> result = {k: round(v, 6) for k, v in result.items()}
<add>
<add> return result, decoded_preds, decoded_labels
<add>
<add> # Enable tensorboard only on the master node
<add> has_tensorboard = is_tensorboard_available()
<add> if has_tensorboard and jax.process_index() == 0:
<add> try:
<add> from flax.metrics.tensorboard import SummaryWriter
<add>
<add> summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
<add> except ImportError as ie:
<add> has_tensorboard = False
<add> logger.warning(
<add> f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
<add> )
<add> else:
<add> logger.warning(
<add> "Unable to display metrics through TensorBoard because the package is not installed: "
<add> "Please run pip install tensorboard to enable."
<add> )
<add>
<add> # Initialize our training
<add> rng = jax.random.PRNGKey(training_args.seed)
<add> rng, dropout_rng = jax.random.split(rng)
<add>
<add> # Create learning rate schedule
<add> linear_decay_lr_schedule_fn = create_learning_rate_fn(
<add> num_train_examples_per_epoch,
<add> train_batch_size,
<add> training_args.num_train_epochs,
<add> training_args.warmup_steps,
<add> training_args.learning_rate,
<add> )
<add>
<add> # We use Optax's "masking" functionality to not apply weight decay
<add> # to bias and LayerNorm scale parameters. decay_mask_fn returns a
<add> # mask boolean with the same structure as the parameters.
<add> # The mask is True for parameters that should be decayed.
<add> # Note that this mask is specifically adapted for FlaxBart.
<add> # For FlaxT5, one should correct the layer norm parameter naming
<add> # accordingly - see `run_t5_mlm_flax.py` e.g.
<add> def decay_mask_fn(params):
<add> flat_params = traverse_util.flatten_dict(params)
<add> layer_norm_params = [
<add> (name, "scale") for name in ["self_attn_layer_norm", "layernorm_embedding", "final_layer_norm"]
<add> ]
<add> flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_params) for path in flat_params}
<add> return traverse_util.unflatten_dict(flat_mask)
<add>
<add> # create adam optimizer
<add> adamw = optax.adamw(
<add> learning_rate=linear_decay_lr_schedule_fn,
<add> b1=training_args.adam_beta1,
<add> b2=training_args.adam_beta2,
<add> eps=training_args.adam_epsilon,
<add> weight_decay=training_args.weight_decay,
<add> mask=decay_mask_fn,
<add> )
<add>
<add> # Setup train state
<add> state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng)
<add>
<add> # label smoothed cross entropy
<add> def loss_fn(logits, labels, padding_mask, label_smoothing_factor=0.0):
<add> """
<add> The label smoothing implementation is adapted from Flax's official example:
<add> https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104
<add> """
<add> vocab_size = logits.shape[-1]
<add> confidence = 1.0 - label_smoothing_factor
<add> low_confidence = (1.0 - confidence) / (vocab_size - 1)
<add> normalizing_constant = -(
<add> confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)
<add> )
<add> soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence)
<add>
<add> loss = optax.softmax_cross_entropy(logits, soft_labels)
<add> loss = loss - normalizing_constant
<add>
<add> # ignore padded tokens from loss
<add> loss = loss * padding_mask
<add> loss = loss.sum() / padding_mask.sum()
<add> return loss
<add>
<add> # Define gradient update step fn
<add> def train_step(state, batch, label_smoothing_factor=0.0):
<add> dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
<add>
<add> def compute_loss(params):
<add> labels = batch.pop("labels")
<add> logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
<add> loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
<add> return loss
<add>
<add> grad_fn = jax.value_and_grad(compute_loss)
<add> loss, grad = grad_fn(state.params)
<add> grad = jax.lax.pmean(grad, "batch")
<add>
<add> new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
<add>
<add> metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
<add> metrics = jax.lax.pmean(metrics, axis_name="batch")
<add>
<add> return new_state, metrics
<add>
<add> # Define eval fn
<add> def eval_step(params, batch, label_smoothing_factor=0.0):
<add> labels = batch.pop("labels")
<add> logits = model(**batch, params=params, train=False)[0]
<add> loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
<add>
<add> # summarize metrics
<add> metrics = {"loss": loss}
<add> metrics = jax.lax.pmean(metrics, axis_name="batch")
<add> return metrics
<add>
<add> # Define generation function
<add> max_length = (
<add> data_args.val_max_target_length if data_args.val_max_target_length is not None else model.config.max_length
<add> )
<add> num_beams = data_args.num_beams if data_args.num_beams is not None else model.config.num_beams
<add> gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
<add>
<add> def generate_step(params, batch):
<add> model.params = params
<add> output_ids = model.generate(batch["pixel_values"], **gen_kwargs)
<add> return output_ids.sequences
<add>
<add> # Create parallel version of the train and eval step
<add> p_train_step = jax.pmap(
<add> partial(train_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch", donate_argnums=(0,)
<add> )
<add> p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch")
<add> p_generate_step = jax.pmap(generate_step, "batch")
<add>
<add> # Replicate the train state on each device
<add> state = state.replicate()
<add>
<add> if training_args.do_train:
<add> logger.info("***** Running training *****")
<add> logger.info(f" Num train examples = {num_train_examples_per_epoch}")
<add> logger.info(f" Num Epochs = {num_epochs}")
<add> logger.info(f" Instantaneous train batch size per device = {training_args.per_device_train_batch_size}")
<add> logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
<add> logger.info(f" Optimization steps per epoch = {steps_per_epoch}")
<add> logger.info(f" Total optimization steps = {total_train_steps}")
<add> if training_args.do_eval:
<add> logger.info(f" Num evaluation examples = {num_eval_examples}")
<add> logger.info(f" Instantaneous evaluation batch size per device = {training_args.per_device_eval_batch_size}")
<add> logger.info(f" Total evaluation batch size (w. parallel & distributed) = {eval_batch_size}")
<add> logger.info(f" Evaluation steps = {eval_steps}")
<add> if training_args.do_predict:
<add> logger.info(f" Num test examples = {num_test_examples}")
<add> logger.info(f" Instantaneous test batch size per device = {training_args.per_device_eval_batch_size}")
<add> logger.info(f" Total test batch size (w. parallel & distributed) = {eval_batch_size}")
<add> logger.info(f" Test steps = {test_steps}")
<add>
<add> # create output directory
<add> if not os.path.isdir(os.path.join(training_args.output_dir)):
<add> os.makedirs(os.path.join(training_args.output_dir), exist_ok=True)
<add>
<add> def save_ckpt(ckpt_dir: str, commit_msg: str = ""):
<add> """save checkpoints and push to Hugging Face Hub if specified"""
<add>
<add> # save checkpoint after each epoch and push checkpoint to the hub
<add> if jax.process_index() == 0:
<add> params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
<add> model.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir), params=params)
<add> tokenizer.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir))
<add> if training_args.push_to_hub:
<add> repo.push_to_hub(commit_message=commit_msg, blocking=False)
<add>
<add> def evaluation_loop(
<add> rng: jax.random.PRNGKey,
<add> dataset: Dataset,
<add> metric_key_prefix: str = "eval",
<add> ckpt_dir: str = "",
<add> is_prediction=False,
<add> ):
<add>
<add> logger.info(f"*** {'Predict' if is_prediction else 'Evaluate'} ***")
<add>
<add> metrics = []
<add> preds = []
<add> labels = []
<add>
<add> batches = blockwise_data_loader(
<add> rng,
<add> dataset,
<add> block_size=training_args.block_size,
<add> batch_size=eval_batch_size,
<add> keep_in_memory=False,
<add> shuffle=False,
<add> split="prediction" if is_prediction else "validation",
<add> )
<add> steps = len(dataset) // eval_batch_size
<add> for _ in tqdm(
<add> range(steps), desc=f"{'Predicting' if is_prediction else 'Evaluating'}...", position=2, leave=False
<add> ):
<add> # Model forward
<add> batch = next(batches)
<add> _labels = batch.get("labels", None)
<add> if not is_prediction and _labels is None:
<add> raise ValueError("Evaluation requires the validation dataset to have `labels`")
<add>
<add> if _labels is not None:
<add> _metrics = p_eval_step(state.params, batch)
<add> metrics.append(_metrics)
<add>
<add> # generation
<add> if data_args.predict_with_generate:
<add> generated_ids = p_generate_step(state.params, batch)
<add> preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"])))
<add> if _labels is not None:
<add> labels.extend(jax.device_get(_labels.reshape(-1, _labels.shape[-1])))
<add>
<add> if metrics:
<add> # normalize metrics
<add> metrics = get_metrics(metrics)
<add> metrics = jax.tree_map(jnp.mean, metrics)
<add>
<add> # compute ROUGE metrics
<add> generations = []
<add> rouge_desc = ""
<add> if data_args.predict_with_generate:
<add> if labels:
<add> rouge_metrics, decoded_preds, decoded_labels = compute_metrics(preds, labels)
<add> metrics.update(rouge_metrics)
<add> rouge_desc = " ".join(
<add> [
<add> f"{'Predict' if is_prediction else 'Eval'} {key}: {value} |"
<add> for key, value in rouge_metrics.items()
<add> ]
<add> )
<add> for pred, label in zip(decoded_preds, decoded_labels):
<add> pred = pred.replace("\n", " ")
<add> label = label.replace("\n", " ")
<add> generations.append({"label": label, "pred": pred})
<add> else:
<add> decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
<add> # Some simple post-processing
<add> decoded_preds = [pred.strip() for pred in decoded_preds]
<add> # rougeLSum expects newline after each sentence
<add> decoded_preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in decoded_preds]
<add> for pred in decoded_preds:
<add> pred = pred.replace("\n", " ")
<add> generations.append({"pred": pred})
<add>
<add> if metrics:
<add> # Print metrics and update progress bar
<add> desc = f"{'Predict' if is_prediction else 'Eval'} Loss: {metrics['loss']} | {rouge_desc})"
<add> if training_args.do_train and not is_prediction:
<add> desc = f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | " + desc
<add> epochs.write(desc)
<add> epochs.desc = desc
<add> logger.info(desc)
<add>
<add> if jax.process_index() == 0:
<add>
<add> if not os.path.isdir(os.path.join(training_args.output_dir, ckpt_dir)):
<add> os.makedirs(os.path.join(training_args.output_dir, ckpt_dir), exist_ok=True)
<add>
<add> if metrics:
<add>
<add> # Save metrics (only for the evaluation/prediction being done along with training)
<add> if has_tensorboard and training_args.do_train:
<add> write_metric(
<add> summary_writer, metrics, train_time=None, step=cur_step, metric_key_prefix=metric_key_prefix
<add> )
<add>
<add> # save final metrics in json
<add> metrics = {
<add> f"{metric_key_prefix}_{metric_name}": round(value.item(), 6)
<add> for metric_name, value in metrics.items()
<add> }
<add> _path = os.path.join(training_args.output_dir, ckpt_dir, f"{metric_key_prefix}_results.json")
<add> with open(_path, "w") as f:
<add> json.dump(metrics, f, indent=4, sort_keys=True)
<add>
<add> # Update report
<add> with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
<add> fp.write(desc + "\n")
<add>
<add> # Save generations
<add> if generations:
<add> output_file = os.path.join(training_args.output_dir, ckpt_dir, f"{metric_key_prefix}_generation.json")
<add> with open(output_file, "w", encoding="UTF-8") as fp:
<add> json.dump(generations, fp, ensure_ascii=False, indent=4)
<add>
<add> def evaluate(rng: jax.random.PRNGKey, dataset: Dataset, ckpt_dir: str = ""):
<add> evaluation_loop(rng, dataset, metric_key_prefix="eval", ckpt_dir=ckpt_dir)
<add>
<add> def predict(rng: jax.random.PRNGKey, dataset: Dataset):
<add> evaluation_loop(rng, dataset, metric_key_prefix="test", is_prediction=True)
<add>
<add> input_rng = None
<add>
<add> if training_args.do_train:
<add>
<add> cur_step = 0
<add> train_time = 0
<add> epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
<add>
<add> for epoch in epochs:
<add> # ======================== Training ================================
<add> # Create sampling rng
<add> rng, input_rng = jax.random.split(rng)
<add>
<add> train_metrics = []
<add> train_batches = blockwise_data_loader(
<add> input_rng,
<add> train_dataset,
<add> block_size=training_args.block_size,
<add> batch_size=train_batch_size,
<add> keep_in_memory=True,
<add> shuffle=True,
<add> split="train",
<add> )
<add>
<add> # train
<add> for (batch_idx, _) in enumerate(tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False)):
<add>
<add> cur_step += 1
<add> batch = next(train_batches)
<add> batch_start = time.time()
<add> state, train_metric = p_train_step(state, batch)
<add> train_metrics.append(train_metric)
<add> train_time += time.time() - batch_start
<add> time_per_step = train_time / cur_step
<add>
<add> # log and save info
<add> if training_args.logging_steps > 0 and cur_step % training_args.logging_steps == 0:
<add>
<add> _train_metric = unreplicate(train_metric)
<add> desc = f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | Loss: {_train_metric['loss']} | Learning Rate: {_train_metric['learning_rate']} | Time per step: {time_per_step})"
<add> epochs.desc = desc
<add> epochs.write(desc)
<add>
<add> logger.info(desc)
<add>
<add> with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
<add> fp.write(desc + "\n")
<add>
<add> # Save metrics
<add> if has_tensorboard and jax.process_index() == 0:
<add> write_metric(
<add> summary_writer,
<add> train_metrics,
<add> train_time=train_time,
<add> step=cur_step,
<add> metric_key_prefix="train",
<add> )
<add>
<add> # ======================== Evaluating (inside an epoch) ==============================
<add>
<add> if (
<add> training_args.do_eval
<add> and (training_args.eval_steps is not None and training_args.eval_steps > 0)
<add> and cur_step % training_args.eval_steps == 0
<add> ):
<add> ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}"
<add> commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}"
<add> evaluate(input_rng, eval_dataset, ckpt_dir)
<add> save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg)
<add>
<add> # ======================== Epoch End ==============================
<add>
<add> # log and save info
<add> if training_args.logging_steps <= 0:
<add>
<add> logger.info(desc)
<add>
<add> with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
<add> fp.write(desc + "\n")
<add>
<add> # Save metrics
<add> if has_tensorboard and jax.process_index() == 0:
<add> write_metric(
<add> summary_writer, train_metrics, train_time=train_time, step=cur_step, metric_key_prefix="train"
<add> )
<add>
<add> # ======================== Evaluating (after each epoch) ==============================
<add>
<add> if training_args.do_eval and (training_args.eval_steps is None or training_args.eval_steps <= 0):
<add> ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}"
<add> commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}"
<add> evaluate(input_rng, eval_dataset, ckpt_dir)
<add> save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg)
<add>
<add> # ======================== Evaluating | Predicting ==============================
<add>
<add> # Create sampling rng
<add> if input_rng is None:
<add> rng, input_rng = jax.random.split(rng)
<add>
<add> # run evaluation without training
<add> if training_args.do_eval and not training_args.do_train:
<add> evaluate(input_rng, eval_dataset)
<add>
<add> # run prediction after (or without) training
<add> if training_args.do_predict:
<add> predict(input_rng, predict_dataset)
<add>
<add>
<add>if __name__ == "__main__":
<add> main() | 3 |
Mixed | Java | add overscrollmode prop to scrollview | 12c486862896ca2391577410656dfc4580361fef | <ide><path>Libraries/Components/ScrollView/ScrollView.js
<ide> const ScrollView = React.createClass({
<ide> * @platform android
<ide> */
<ide> scrollPerfTag: PropTypes.string,
<add>
<add> /**
<add> * Used to override default value of overScroll mode.
<add> *
<add> * Possible values:
<add> *
<add> * - `'auto'` - Default value, allow a user to over-scroll
<add> * this view only if the content is large enough to meaningfully scroll.
<add> * - `'always'` - Always allow a user to over-scroll this view.
<add> * - `'never'` - Never allow a user to over-scroll this view.
<add> *
<add> * @platform android
<add> */
<add> overScrollMode: PropTypes.oneOf([
<add> 'auto',
<add> 'always',
<add> 'never',
<add> ]),
<ide> },
<ide>
<ide> mixins: [ScrollResponder.Mixin],
<ide><path>ReactAndroid/src/main/java/com/facebook/react/views/scroll/ReactHorizontalScrollViewManager.java
<ide> import javax.annotation.Nullable;
<ide>
<ide> import android.graphics.Color;
<add>import android.view.View;
<ide>
<ide> import com.facebook.react.bridge.ReadableArray;
<ide> import com.facebook.react.module.annotations.ReactModule;
<ide> public void setPagingEnabled(ReactHorizontalScrollView view, boolean pagingEnabl
<ide> view.setPagingEnabled(pagingEnabled);
<ide> }
<ide>
<add> /**
<add> * Controls overScroll behaviour
<add> */
<add> @ReactProp(name = "overScrollMode")
<add> public void setOverScrollMode(ReactHorizontalScrollView view, String value) {
<add> view.setOverScrollMode(ReactScrollViewHelper.parseOverScrollMode(value));
<add> }
<add>
<ide> @Override
<ide> public void receiveCommand(
<ide> ReactHorizontalScrollView scrollView,
<ide><path>ReactAndroid/src/main/java/com/facebook/react/views/scroll/ReactScrollViewHelper.java
<ide> import android.view.View;
<ide> import android.view.ViewGroup;
<ide>
<add>import com.facebook.react.bridge.JSApplicationIllegalArgumentException;
<ide> import com.facebook.react.bridge.ReactContext;
<ide> import com.facebook.react.uimanager.UIManagerModule;
<ide>
<ide> public class ReactScrollViewHelper {
<ide>
<ide> public static final long MOMENTUM_DELAY = 20;
<add> public static final String OVER_SCROLL_ALWAYS = "always";
<add> public static final String AUTO = "auto";
<add> public static final String OVER_SCROLL_NEVER = "never";
<ide>
<ide> /**
<ide> * Shared by {@link ReactScrollView} and {@link ReactHorizontalScrollView}.
<ide> private static void emitScrollEvent(ViewGroup scrollView, ScrollEventType scroll
<ide> scrollView.getWidth(),
<ide> scrollView.getHeight()));
<ide> }
<add>
<add> public static int parseOverScrollMode(String jsOverScrollMode) {
<add> if (jsOverScrollMode == null || jsOverScrollMode.equals(AUTO)) {
<add> return View.OVER_SCROLL_IF_CONTENT_SCROLLS;
<add> } else if (jsOverScrollMode.equals(OVER_SCROLL_ALWAYS)) {
<add> return View.OVER_SCROLL_ALWAYS;
<add> } else if (jsOverScrollMode.equals(OVER_SCROLL_NEVER)) {
<add> return View.OVER_SCROLL_NEVER;
<add> } else {
<add> throw new JSApplicationIllegalArgumentException("wrong overScrollMode: " + jsOverScrollMode);
<add> }
<add> }
<ide> }
<ide><path>ReactAndroid/src/main/java/com/facebook/react/views/scroll/ReactScrollViewManager.java
<ide> import java.util.Map;
<ide>
<ide> import android.graphics.Color;
<add>import android.view.View;
<ide>
<ide> import com.facebook.react.bridge.ReadableArray;
<ide> import com.facebook.react.common.MapBuilder;
<ide> public void setBottomFillColor(ReactScrollView view, int color) {
<ide> view.setEndFillColor(color);
<ide> }
<ide>
<add> /**
<add> * Controls overScroll behaviour
<add> */
<add> @ReactProp(name = "overScrollMode")
<add> public void setOverScrollMode(ReactScrollView view, String value) {
<add> view.setOverScrollMode(ReactScrollViewHelper.parseOverScrollMode(value));
<add> }
<add>
<ide> @Override
<ide> public @Nullable Map<String, Integer> getCommandsMap() {
<ide> return ReactScrollViewCommandHelper.getCommandsMap(); | 4 |
Javascript | Javascript | improve performance of eventemitter.emit | c5f5f84a33967862036c7d87f4bbde6a59d3820a | <ide><path>lib/events.js
<ide> 'use strict';
<ide>
<ide> const { Math, Object, Reflect } = primordials;
<add>const apply = Reflect.apply;
<ide>
<ide> var spliceOne;
<ide>
<ide> EventEmitter.prototype.emit = function emit(type, ...args) {
<ide> return false;
<ide>
<ide> if (typeof handler === 'function') {
<del> Reflect.apply(handler, this, args);
<add> apply(handler, this, args);
<ide> } else {
<ide> const len = handler.length;
<ide> const listeners = arrayClone(handler, len);
<ide> for (var i = 0; i < len; ++i)
<del> Reflect.apply(listeners[i], this, args);
<add> apply(listeners[i], this, args);
<ide> }
<ide>
<ide> return true; | 1 |
PHP | PHP | fix typo in doc block | 415724e94e696ce90755cf0374547982f26b9e07 | <ide><path>src/Illuminate/Notifications/ChannelManager.php
<ide> protected function shouldSendNotification($notifiable, $notification, $channel)
<ide> * Queue the given notification instances.
<ide> *
<ide> * @param mixed $notifiables
<del> * @param array[\Illuminate\Notifcations\Channels\Notification] $notification
<add> * @param array[\Illuminate\Notifications\Channels\Notification] $notification
<ide> * @return void
<ide> */
<ide> protected function queueNotification($notifiables, $notification) | 1 |
Text | Text | add documentation for initializations and datasets | adb0b1db4c61a90bdaedc2f5e287e9683d41157a | <ide><path>docs/sources/datasets.md
<add># Datasets
<add>
<add>## CIFAR10 small image classification
<add>
<add>`keras.datasets.cifar10`
<add>
<add>Dataset of 50,000 32x32 color images, labeled over 10 categories.
<add>
<add>### Usage:
<add>
<add>```python
<add>(X_train, y_train), (X_test, y_test) = cifar10.load_data(test_split=0.1, seed=113)
<add>```
<add>
<add>__Returns:__
<add>
<add>- X_train, X_test: uint8 array of RGB image data with shape (nb_samples, 3, 32, 32).
<add>- y_train, y_test: uint8 array of category labels (integers in range 0-9) with shape (nb_samples,).
<add>
<add>__Arguments:__
<add>
<add>- test_split: float. Fraction of the dataset to be used as test data.
<add>- seed: int. Seed for reproducible data shuffling.
<add>
<add>## IMDB Movie reviews sentiment classification
<add>
<add>`keras.datasets.imdb`
<add>
<add>Dataset of 25,000 movies reviews from IMDB, labeled by sentiment (positive/negative). Reviews have been preprocessed, and each review is encoded as a sequence of word indexes (integers). For convenience, words are indexed by overall frequency in the dataset, so that for instance the integer "3" encodes the 3rd most frequent word in the data. This allows for quick filtering operations such as: "only consider the top 10,000 most common words, but eliminate the top 20 most common words".
<add>
<add>As a convention, "0" does not stand for a specific word, but instead is used to encode any unknown word.
<add>
<add>### Usage:
<add>
<add>```python
<add>(X_train, y_train), (X_test, y_test) = imdb.load_data(path="imdb.pkl", \
<add>nb_words=None, skip_top=0, maxlen=None, test_split=0.1, seed=113)`
<add>```
<add>__Returns:__
<add>
<add>- X_train, X_test: list of sequences, which are lists of indexes (integers). If the nb_words argument was specific, the maximum possible index value is nb_words-1. If the maxlen argument was specified, the largest possible sequence length is maxlen.
<add>- y_train, y_test: list of integer labels (1 or 0).
<add>
<add>__Arguments:__
<add>
<add>- path: if you do have the data locally (at `'~/.keras/datasets/' + path`), if will be downloaded to this location (in cPickle format).
<add>- nb_words: integer or None. Top most frequent words to consider. Any less frequent word will appear as 0 in the sequence data.
<add>- skip_top: integer. Top most frequent words to ignore (they will appear as 0s in the sequence data).
<add>- maxlen: int. Maximum sequence length. Any longer sequence will be truncated.
<add>- test_split: float. Fraction of the dataset to be used as test data.
<add>- seed: int. Seed for reproducible data shuffling.
<add>
<add>## Reuters newswire topics classification
<add>
<add>`keras.datasets.reuters`
<add>
<add>Dataset of 11,228 newswires from Reuters, labeled over 46 topics. As with the IMDB dataset, each wire is encoded as a sequence of word indexes (same conventions).
<add>
<add>### Usage:
<add>
<add>```python
<add>(X_train, y_train), (X_test, y_test) = reuters.load_data(path="reuters.pkl", \
<add>nb_words=None, skip_top=0, maxlen=None, test_split=0.1, seed=113)`
<add>```
<add>
<add>The specifications are the same as that of the IMDB dataset.
<add>
<add>This dataset also makes available the word index used for encoding the sequences:
<add>
<add>```python
<add>word_index = reuters.get_word_index(path="reuters_word_index.pkl")
<add>```
<add>
<add>__Returns:__ A dictionary where key are words (str) and values are indexes (integer). eg. `word_index["giraffe"]` might return `1234`.
<add>
<add>__Arguments:__
<add>
<add>- path: if you do have the index file locally (at `'~/.keras/datasets/' + path`), if will be downloaded to this location (in cPickle format).
<ide><path>docs/sources/initializations.md
<add># Initializations
<add>
<add>## Usage of initializations
<add>
<add>Initializations define the probability distribution used to set the initial random weights of Keras layers.
<add>
<add>The keyword arguments used for passing initializations to layers will depend on the layer. Usually it is simply `init`:
<add>
<add>```python
<add>model.add(Dense(64, 64, init='uniform'))
<add>```
<add>
<add>## Available initializations
<add>
<add>- __uniform__
<add>- __normal__
<add>- __orthogonal__: use with square 2D layers (`shape[0] == shape[1]`).
<add>- __zero__
<ide>\ No newline at end of file | 2 |
Text | Text | change windowscontainers.md to readme.md | 213e49b8f1b80178da015bce2a9fdc3a0f541d9b | <ide><path>docs/contributing/software-req-win.md
<ide> https://git-scm.com/download/win.
<ide> ### 3. The machine must be configured to run containers
<ide>
<ide> For example, by following the quick start guidance at
<del>https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or https://github.com/docker/labs/blob/master/windows/windows-containers/WindowsContainers.md
<add>https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or https://github.com/docker/labs/blob/master/windows/windows-containers/README.md
<ide>
<ide> ### 4. If building in a Hyper-V VM
<ide>
<ide> To test and run the Windows Moby engine, you need a system that supports Windows
<ide> - Windows 10 Anniversary Edition
<ide> - Windows Server 2016 running in a VM, on bare metal or in the cloud
<ide>
<del>Check out the [getting started documentation](https://github.com/docker/labs/blob/master/windows/windows-containers/WindowsContainers.md) for details.
<add>Check out the [getting started documentation](https://github.com/docker/labs/blob/master/windows/windows-containers/README.md) for details.
<ide>
<ide> ### 2. GitHub account
<ide> | 1 |
PHP | PHP | fix multi-model validators with deep & atomic | f250592feede6dd71fdef27d2d2a35cd0abae8c2 | <ide><path>lib/Cake/Model/ModelValidator.php
<ide> public function validateAssociated(&$data, $options = array()) {
<ide> $data[$association] = $model->{$association}->data[$model->{$association}->alias];
<ide> }
<ide> if (is_array($validates)) {
<del> if (in_array(false, $validates, true)) {
<add> if (in_array(false, Hash::flatten($validates), true)) {
<ide> $validates = false;
<ide> } else {
<ide> $validates = true;
<ide> public function validateMany(&$data, $options = array()) {
<ide> $validates = $model->set($record) && $model->validates($options);
<ide> $data[$key] = $model->data;
<ide> }
<del> if ($validates === false || (is_array($validates) && in_array(false, $validates, true))) {
<add> if ($validates === false || (is_array($validates) && in_array(false, Hash::flatten($validates), true))) {
<ide> $validationErrors[$key] = $model->validationErrors;
<ide> $validates = false;
<ide> } else {
<ide><path>lib/Cake/Test/Case/Model/ModelValidationTest.php
<ide> public function testCustomMethodWithEmptyValue() {
<ide> $this->assertFalse($model->validates());
<ide> }
<ide>
<add>/**
<add> * Test validateAssociated with atomic=false & deep=true
<add> *
<add> * @return void
<add> */
<add> public function testValidateAssociatedAtomicFalseDeepTrueWithErrors() {
<add> $this->loadFixtures('Comment', 'Article', 'User', 'Attachment');
<add> $Attachment = ClassRegistry::init('Attachment');
<add> $Attachment->Comment->validator()->add('comment', array(
<add> array('rule' => 'notEmpty')
<add> ));
<add> $Attachment->Comment->User->bindModel(array(
<add> 'hasMany' => array(
<add> 'Article',
<add> 'Comment'
<add> )),
<add> false
<add> );
<add>
<add> $data = array(
<add> 'Attachment' => array(
<add> 'attachment' => 'text',
<add> 'Comment' => array(
<add> 'comment' => '',
<add> 'published' => 'N',
<add> 'User' => array(
<add> 'user' => 'Foo',
<add> 'password' => 'mypassword',
<add> 'Comment' => array(
<add> array(
<add> 'comment' => ''
<add> )
<add> )
<add> )
<add> )
<add> )
<add> );
<add> $result = $Attachment->validateAssociated($data, array('atomic' => false, 'deep' => true));
<add>
<add> $result = $Attachment->validationErrors;
<add> $expected = array(
<add> 'Comment' => array(
<add> 'comment' => array(
<add> 0 => 'This field cannot be left blank',
<add> ),
<add> 'User' => array(
<add> 'Comment' => array(
<add> 0 => array(
<add> 'comment' => array(
<add> 0 => 'This field cannot be left blank',
<add> ),
<add> ),
<add> ),
<add> ),
<add> ),
<add> );
<add> $this->assertEquals($result, $expected);
<add> }
<add>
<add>/**
<add> * Test validateMany with atomic=false & deep=true
<add> *
<add> * @return void
<add> */
<add> public function testValidateManyAtomicFalseDeepTrueWithErrors() {
<add> $this->loadFixtures('Comment', 'Article', 'User');
<add> $Article = ClassRegistry::init('Article');
<add> $Article->Comment->validator()->add('comment', array(
<add> array('rule' => 'notEmpty')
<add> ));
<add>
<add> $data = array(
<add> array(
<add> 'Article' => array(
<add> 'user_id' => 1,
<add> 'title' => 'Foo',
<add> 'body' => 'text',
<add> 'published' => 'N'
<add> ),
<add> 'Comment' => array(
<add> array(
<add> 'user_id' => 1,
<add> 'comment' => 'Baz',
<add> 'published' => 'N',
<add> )
<add> ),
<add> ),
<add> array(
<add> 'Article' => array(
<add> 'user_id' => 1,
<add> 'title' => 'Bar',
<add> 'body' => 'text',
<add> 'published' => 'N'
<add> ),
<add> 'Comment' => array(
<add> array(
<add> 'user_id' => 1,
<add> 'comment' => '',
<add> 'published' => 'N',
<add> )
<add> ),
<add> ),
<add> );
<add> $Article->validateMany($data, array('atomic' => false, 'deep' => true));
<add>
<add> $result = $Article->validationErrors;
<add> $expected = array(
<add> 1 => array(
<add> 'Comment' => array(
<add> 0 => array(
<add> 'comment' => array(
<add> 0 => 'This field cannot be left blank',
<add> ),
<add> ),
<add> ),
<add> ),
<add> );
<add> $this->assertEquals($result, $expected);
<add> }
<add>
<ide> } | 2 |
PHP | PHP | add minor changes to http client | 39f1da2756de9e5b6cf6e486d0000cf53ac3357a | <ide><path>src/Illuminate/Http/Client/Factory.php
<ide> namespace Illuminate\Http\Client;
<ide>
<ide> use Closure;
<add>use GuzzleHttp\Psr7\Response as Psr7Response;
<ide> use Illuminate\Support\Str;
<ide> use Illuminate\Support\Traits\Macroable;
<ide> use PHPUnit\Framework\Assert as PHPUnit;
<add>use function GuzzleHttp\Promise\promise_for;
<ide>
<ide> class Factory
<ide> {
<ide> class Factory
<ide> /**
<ide> * The stub callables that will handle requests.
<ide> *
<del> * @var \Illuminate\Support\Collection|null
<add> * @var \Illuminate\Support\Collection
<ide> */
<ide> protected $stubCallbacks;
<ide>
<ide> class Factory
<ide> */
<ide> protected $responseSequences = [];
<ide>
<add> /**
<add> * The record array.
<add> *
<add> * @var array
<add> */
<add> protected $recorded;
<add>
<ide> /**
<ide> * Create a new factory instance.
<ide> *
<ide> public static function response($body = null, $status = 200, $headers = [])
<ide> $headers['Content-Type'] = 'application/json';
<ide> }
<ide>
<del> return \GuzzleHttp\Promise\promise_for(new \GuzzleHttp\Psr7\Response($status, $headers, $body));
<add> return promise_for(new Psr7Response($status, $headers, $body));
<ide> }
<ide>
<ide> /**
<ide> public function fake($callback = null)
<ide> $this->stubUrl($url, $callable);
<ide> }
<ide>
<del> return;
<add> return $this;
<ide> }
<ide>
<ide> $this->stubCallbacks = $this->stubCallbacks->merge(collect([
<ide><path>src/Illuminate/Http/Client/Request.php
<ide> public function __construct($request)
<ide> /**
<ide> * Get the request method.
<ide> *
<del> * @return strign
<add> * @return string
<ide> */
<ide> public function method()
<ide> {
<ide> public function hasHeader($key, $value = null)
<ide> /**
<ide> * Get the values for the header with the given name.
<ide> *
<add> * @param string $key
<ide> * @return array
<ide> */
<ide> public function header($key)
<ide><path>src/Illuminate/Http/Client/Response.php
<ide> public function json()
<ide> /**
<ide> * Get a header from the response.
<ide> *
<add> * @param string $header
<ide> * @return string
<ide> */
<ide> public function header(string $header)
<ide> public function redirect()
<ide> }
<ide>
<ide> /**
<del> * Detemine if the response indicates a client error occurred.
<add> * Determine if the response indicates a client error occurred.
<ide> *
<ide> * @return bool
<ide> */
<ide> public function clientError()
<ide> }
<ide>
<ide> /**
<del> * Detemine if the response indicates a server error occurred.
<add> * Determine if the response indicates a server error occurred.
<ide> *
<ide> * @return bool
<ide> */
<ide> public function toPsrResponse()
<ide> * Throw an exception if a server or client error occurred.
<ide> *
<ide> * @return $this
<add> *
<add> * @throws \Illuminate\Http\Client\RequestException
<ide> */
<ide> public function throw()
<ide> {
<ide><path>src/Illuminate/Http/Client/ResponseSequence.php
<ide> class ResponseSequence
<ide> protected $failWhenEmpty = true;
<ide>
<ide> /**
<del> * The repsonse that should be returned when the sequence is empty.
<add> * The response that should be returned when the sequence is empty.
<ide> *
<ide> * @var \GuzzleHttp\Promise\PromiseInterface
<ide> */
<ide> public function __construct(array $responses)
<ide> */
<ide> public function push($body = '', int $status = 200, array $headers = [])
<ide> {
<del> if (is_array($body)) {
<del> return $this->pushResponse(
<del> Factory::response(json_encode($body), $status, $headers)
<del> );
<del> }
<add> $body = is_array($body) ? json_encode($body) : $body;
<ide>
<ide> return $this->pushResponse(
<ide> Factory::response($body, $status, $headers) | 4 |
Javascript | Javascript | fix more recent jscs failures | f3aeaf7727eda467b30a5f338824db2fd7baf42a | <ide><path>packages/ember-views/lib/views/view.js
<ide> var EMPTY_ARRAY = [];
<ide> @namespace Ember
<ide> @extends Ember.CoreView
<ide> */
<add>// jscs:disable validateIndentation
<ide> var View = CoreView.extend(
<ide> ViewStreamSupport,
<ide> ViewKeywordSupport,
<ide> var View = CoreView.extend(
<ide> return scheduledFn;
<ide> }
<ide> });
<add>// jscs:enable validateIndentation
<ide>
<ide> deprecateProperty(View.prototype, 'state', '_state');
<ide> deprecateProperty(View.prototype, 'states', '_states');
<ide> deprecateProperty(View.prototype, 'states', '_states');
<ide> on a destroyed view.
<ide> */
<ide>
<del> // in the destroyed state, everything is illegal
<add>// in the destroyed state, everything is illegal
<ide>
<del> // before rendering has begun, all legal manipulations are noops.
<add>// before rendering has begun, all legal manipulations are noops.
<ide>
<del> // inside the buffer, legal manipulations are done on the buffer
<add>// inside the buffer, legal manipulations are done on the buffer
<ide>
<del> // once the view has been inserted into the DOM, legal manipulations
<del> // are done on the DOM element.
<add>// once the view has been inserted into the DOM, legal manipulations
<add>// are done on the DOM element.
<ide>
<ide> var mutation = EmberObject.extend(Evented).create();
<ide> // TODO MOVE TO RENDERER HOOKS
<ide><path>packages/ember-views/tests/views/collection_test.js
<ide> QUnit.test("should render the emptyView if content array is empty and emptyView
<ide> Ember.lookup = {
<ide> App: {
<ide> EmptyView: View.extend({
<del> tagName: 'kbd',
<del> render(buf) {
<del> buf.push("THIS IS AN EMPTY VIEW");
<del> }
<add> tagName: 'kbd',
<add> render(buf) {
<add> buf.push("THIS IS AN EMPTY VIEW");
<add> }
<ide> })
<ide> }
<ide> }; | 2 |
Text | Text | add import rctbridge.h for event sending example | 8df46c329ba41595fbaa09d13efeb0e4eb439fe9 | <ide><path>docs/NativeModulesIOS.md
<ide> Note that the constants are exported only at initialization time, so if you chan
<ide> The native module can signal events to JavaScript without being invoked directly. The easiest way to do this is to use `eventDispatcher`:
<ide>
<ide> ```objective-c
<add>#import "RCTBridge.h"
<add>
<ide> - (void)calendarEventReminderReceived:(NSNotification *)notification
<ide> {
<ide> NSString *eventName = notification.userInfo[@"name"]; | 1 |
Text | Text | add suggestion for openssl only sec releases | 1e5bafb9c8ba06835928064c55e274b592306eef | <ide><path>doc/contributing/security-release-process.md
<ide> The current security stewards are documented in the main Node.js
<ide> (Re-PR the pre-approved branch from nodejs-private/nodejs.org-private to
<ide> nodejs/nodejs.org)
<ide>
<add> If the security release will only contain an OpenSSL update consider
<add> adding the following to the pre-release announcement:
<add>
<add> ```text
<add> Since this security release will only include updates for OpenSSL, if you're using
<add> a Node.js version which is part of a distribution which uses a system
<add> installed OpenSSL, this Node.js security update might not concern you. You may
<add> instead need to update your system OpenSSL libraries, please check the
<add> security announcements for the distribution.
<add> ```
<add>
<ide> * [ ] Pre-release announcement [email][]: _**LINK TO EMAIL**_
<ide> * Subject: `Node.js security updates for all active release lines, Month Year`
<ide> * Body: | 1 |
Javascript | Javascript | register logbox by default in dev | 9b1845c3a09266fa4f28c4ec2c1eecb846895609 | <ide><path>Libraries/ReactNative/AppRegistry.js
<ide> const AppRegistry = {
<ide> appParameters.fabric,
<ide> showArchitectureIndicator,
<ide> scopedPerformanceLogger,
<add> appKey === 'LogBox',
<ide> );
<ide> },
<ide> };
<ide> const AppRegistry = {
<ide>
<ide> BatchedBridge.registerCallableModule('AppRegistry', AppRegistry);
<ide>
<add>if (__DEV__) {
<add> AppRegistry.registerComponent('LogBox', () => () => null);
<add>}
<add>
<ide> module.exports = AppRegistry;
<ide><path>Libraries/ReactNative/renderApplication.js
<ide> function renderApplication<Props: Object>(
<ide> fabric?: boolean,
<ide> showArchitectureIndicator?: boolean,
<ide> scopedPerformanceLogger?: IPerformanceLogger,
<add> isLogBox?: boolean,
<ide> ) {
<ide> invariant(rootTag, 'Expect to have a valid rootTag, instead got ', rootTag);
<ide>
<ide> function renderApplication<Props: Object>(
<ide> rootTag={rootTag}
<ide> fabric={fabric}
<ide> showArchitectureIndicator={showArchitectureIndicator}
<del> WrapperComponent={WrapperComponent}>
<add> WrapperComponent={WrapperComponent}
<add> internal_excludeLogBox={isLogBox}>
<ide> <RootComponent {...initialProps} rootTag={rootTag} />
<ide> </AppContainer>
<ide> </PerformanceLoggerContext.Provider> | 2 |
PHP | PHP | reverse a breaking addition in router | ab4bad7d2caf5535e6d3e65ce5937e4495853528 | <ide><path>src/Illuminate/Routing/Route.php
<ide> protected function extractOptionalParameters()
<ide> public function middleware($middleware = null)
<ide> {
<ide> if (is_null($middleware)) {
<del> $middlewares = (array) Arr::get($this->action, 'middleware', []);
<del>
<del> if (is_string($this->action['uses'])) {
<del> $middlewares = array_merge(
<del> $middlewares, $this->controllerMiddleware()
<del> );
<del> }
<del>
<del> return $middlewares;
<add> return (array) Arr::get($this->action, 'middleware', []);
<ide> }
<ide>
<ide> if (is_string($middleware)) { | 1 |
Ruby | Ruby | change the operator | e486c8710ba32a428d1349b5bc46fdef96254aea | <ide><path>Library/Homebrew/extend/os/mac/search.rb
<ide> def search_casks(string_or_regex)
<ide> .search(string_or_regex)
<ide>
<ide> cask_names = Cask::Cask.all.map(&:full_name)
<del> results |= DidYouMean::SpellChecker.new(dictionary: cask_names)
<add> results += DidYouMean::SpellChecker.new(dictionary: cask_names)
<ide> .correct(string_or_regex)
<ide>
<ide> results.sort.map do |name| | 1 |
Text | Text | add links to contributor github profiles | 3dc1e92bf24d30885e695f99846f9ff9706004eb | <ide><path>CONTRIBUTORS.md
<ide>
<ide> This is a list of everyone who has made significant contributions to spaCy, in alphabetical order. Thanks a lot for the great work!
<ide>
<del>* Adam Bittlingmayer, @bittlingmayer
<del>* Andreas Grivas, @andreasgrv
<del>* Chris DuBois, @chrisdubois
<del>* Christoph Schwienheer, @chssch
<del>* Henning Peters, @henningpeters
<del>* Ines Montani, @ines
<del>* J Nicolas Schrading, @NSchrading
<del>* Jordan Suchow, @suchow
<del>* Kendrick Tan, @kendricktan
<del>* Liling Tan, @alvations
<del>* Matthew Honnibal, @honnibal
<del>* Maxim Samsonov, @maxirmx
<del>* Oleg Zd, @olegzd
<del>* Sam Bozek, @sambozek
<del>* Vsevolod Solovyov, @vsolovyov
<del>* Wah Loon Keng, @kengz
<del>* Wolfgang Seeker, @wbwseeker
<del>* Yubing Dong, @tomtung
<add>* Adam Bittlingmayer, [@bittlingmayer](https://github.com/bittlingmayer)
<add>* Andreas Grivas, [@andreasgrv](https://github.com/andreasgrv)
<add>* Chris DuBois, [@chrisdubois](https://github.com/chrisdubois)
<add>* Christoph Schwienheer, [@chssch](https://github.com/chssch)
<add>* Henning Peters, [@henningpeters](https://github.com/henningpeters)
<add>* Ines Montani, [@ines](https://github.com/ines)
<add>* J Nicolas Schrading, [@NSchrading](https://github.com/NSchrading)
<add>* Jordan Suchow, [@suchow](https://github.com/suchow)
<add>* Kendrick Tan, [@kendricktan](https://github.com/kendricktan)
<add>* Liling Tan, [@alvations](https://github.com/alvations)
<add>* Matthew Honnibal, [@honnibal](https://github.com/honnibal)
<add>* Maxim Samsonov, [@maxirmx](https://github.com/maxirmx)
<add>* Oleg Zd, [@olegzd](https://github.com/olegzd)
<add>* Sam Bozek, [@sambozek](https://github.com/sambozek)
<add>* Vsevolod Solovyov, [@vsolovyov](https://github.com/vsolovyov)
<add>* Wah Loon Keng, [@kengz](https://github.com/kengz)
<add>* Wolfgang Seeker, [@wbwseeker](https://github.com/wbwseeker)
<add>* Yubing Dong, [@tomtung](https://github.com/tomtung) | 1 |
Javascript | Javascript | use simplified validator | f037d29abe1ebdfcb8b57528c1fa39d62197e8e6 | <ide><path>lib/dgram.js
<ide> Socket.prototype.bind = function(port_, address_ /* , callback */) {
<ide> };
<ide>
<ide> Socket.prototype.connect = function(port, address, callback) {
<del> port = validatePort(port, 'Port', { allowZero: false });
<add> port = validatePort(port, 'Port', false);
<ide> if (typeof address === 'function') {
<ide> callback = address;
<ide> address = '';
<ide> Socket.prototype.send = function(buffer,
<ide> }
<ide>
<ide> if (!connected)
<del> port = validatePort(port, 'Port', { allowZero: false });
<add> port = validatePort(port, 'Port', false);
<ide>
<ide> // Normalize callback so it's either a function or undefined but not anything
<ide> // else. | 1 |
Ruby | Ruby | remove duplicated tests | 50318f5d7e0e829b835115190a495ffade4fc8ab | <ide><path>actionpack/test/controller/integration_test.rb
<ide> def test_request_via_redirect_returns_status
<ide> assert_equal 200, @session.request_via_redirect(:get, path, params: args, headers: headers)
<ide> end
<ide>
<del> def test_get_via_redirect
<del> path = "/somepath"; args = {:id => '1'}; headers = {"X-Test-Header" => "testvalue" }
<del> @session.expects(:request_via_redirect).with(:get, path, params: args, headers: headers)
<del> @session.get_via_redirect(path, params: args, headers: headers)
<del> end
<del>
<ide> def test_deprecated_get_via_redirect
<ide> path = "/somepath"; args = { id: '1' }; headers = { "X-Test-Header" => "testvalue" }
<ide> @session.expects(:request_via_redirect).with(:get, path, args, headers)
<ide> def test_deprecated_get_via_redirect
<ide> end
<ide> end
<ide>
<del> def test_post_via_redirect
<del> path = "/somepath"; args = {:id => '1'}; headers = {"X-Test-Header" => "testvalue" }
<del> @session.expects(:request_via_redirect).with(:post, path, params: args, headers: headers)
<del> @session.post_via_redirect(path, params: args, headers: headers)
<del> end
<del>
<ide> def test_deprecated_post_via_redirect
<ide> path = "/somepath"; args = { id: '1' }; headers = { "X-Test-Header" => "testvalue" }
<ide> @session.expects(:request_via_redirect).with(:post, path, args, headers)
<ide> def test_deprecated_post_via_redirect
<ide> end
<ide> end
<ide>
<del> def test_patch_via_redirect
<del> path = "/somepath"; args = {:id => '1'}; headers = {"X-Test-Header" => "testvalue" }
<del> @session.expects(:request_via_redirect).with(:patch, path, params: args, headers: headers)
<del> @session.patch_via_redirect(path, params: args, headers: headers)
<del> end
<del>
<ide> def test_deprecated_patch_via_redirect
<ide> path = "/somepath"; args = { id: '1' }; headers = { "X-Test-Header" => "testvalue" }
<ide> @session.expects(:request_via_redirect).with(:patch, path, args, headers)
<ide> def test_deprecated_patch_via_redirect
<ide> end
<ide> end
<ide>
<del> def test_put_via_redirect
<del> path = "/somepath"; args = {:id => '1'}; headers = {"X-Test-Header" => "testvalue" }
<del> @session.expects(:request_via_redirect).with(:put, path, params: args, headers: headers)
<del> @session.put_via_redirect(path, params: args, headers: headers)
<del> end
<del>
<ide> def test_deprecated_put_via_redirect
<ide> path = "/somepath"; args = { id: '1' }; headers = { "X-Test-Header" => "testvalue" }
<ide> @session.expects(:request_via_redirect).with(:put, path, args, headers)
<ide> def test_deprecated_put_via_redirect
<ide> end
<ide> end
<ide>
<del> def test_delete_via_redirect
<del> path = "/somepath"; args = {:id => '1'}; headers = {"X-Test-Header" => "testvalue" }
<del> @session.expects(:request_via_redirect).with(:delete, path, params: args, headers: headers)
<del> @session.delete_via_redirect(path, params: args, headers: headers)
<del> end
<del>
<ide> def test_deprecated_delete_via_redirect
<ide> path = "/somepath"; args = { id: '1' }; headers = { "X-Test-Header" => "testvalue" }
<ide> @session.expects(:request_via_redirect).with(:delete, path, args, headers) | 1 |
PHP | PHP | set component alias name | c2cdb44f4819aac349452e33d469548bab4d7566 | <ide><path>src/Illuminate/View/Compilers/ComponentTagCompiler.php
<ide> protected function componentString(string $component, array $attributes)
<ide> }
<ide>
<ide> return " @component('{$class}', [".$this->attributesToString($parameters, $escapeBound = false).'])
<add><?php $component->withName(\''.$component.'\'); ?>
<ide> <?php $component->withAttributes(['.$this->attributesToString($attributes->all()).']); ?>';
<ide> }
<ide>
<ide><path>src/Illuminate/View/Component.php
<ide> abstract class Component
<ide> */
<ide> protected $except = [];
<ide>
<add> /**
<add> * The component alias name.
<add> *
<add> * @var string
<add> */
<add> public $componentName;
<add>
<ide> /**
<ide> * The component attributes.
<ide> *
<ide> protected function ignoredMethods()
<ide> ], $this->except);
<ide> }
<ide>
<add> /**
<add> * Set the component alias name.
<add> *
<add> * @param string $name
<add> * @return $this
<add> */
<add> public function withName($name)
<add> {
<add> $this->componentName = $name;
<add>
<add> return $this;
<add> }
<add>
<ide> /**
<ide> * Set the extra attributes that the component should make available.
<ide> *
<ide><path>tests/View/Blade/BladeComponentTagCompilerTest.php
<ide> public function testBasicComponentParsing()
<ide> $result = (new ComponentTagCompiler(['alert' => TestAlertComponent::class]))->compileTags('<div><x-alert type="foo" limit="5" @click="foo" required /><x-alert /></div>');
<ide>
<ide> $this->assertSame("<div> @component('Illuminate\Tests\View\Blade\TestAlertComponent', [])
<add><?php \$component->withName('alert'); ?>
<ide> <?php \$component->withAttributes(['type' => 'foo','limit' => '5','@click' => 'foo','required' => true]); ?>\n".
<ide> "@endcomponentClass @component('Illuminate\Tests\View\Blade\TestAlertComponent', [])
<add><?php \$component->withName('alert'); ?>
<ide> <?php \$component->withAttributes([]); ?>\n".
<ide> '@endcomponentClass </div>', trim($result));
<ide> }
<ide> public function testBasicComponentWithEmptyAttributesParsing()
<ide> $result = (new ComponentTagCompiler(['alert' => TestAlertComponent::class]))->compileTags('<div><x-alert type="" limit=\'\' @click="" required /></div>');
<ide>
<ide> $this->assertSame("<div> @component('Illuminate\Tests\View\Blade\TestAlertComponent', [])
<add><?php \$component->withName('alert'); ?>
<ide> <?php \$component->withAttributes(['type' => '','limit' => '','@click' => '','required' => true]); ?>\n".
<ide> '@endcomponentClass </div>', trim($result));
<ide> }
<ide> public function testDataCamelCasing()
<ide> $result = (new ComponentTagCompiler(['profile' => TestProfileComponent::class]))->compileTags('<x-profile user-id="1"></x-profile>');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestProfileComponent', ['userId' => '1'])
<add><?php \$component->withName('profile'); ?>
<ide> <?php \$component->withAttributes([]); ?> @endcomponentClass", trim($result));
<ide> }
<ide>
<ide> public function testColonData()
<ide> $result = (new ComponentTagCompiler(['profile' => TestProfileComponent::class]))->compileTags('<x-profile :user-id="1"></x-profile>');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestProfileComponent', ['userId' => 1])
<add><?php \$component->withName('profile'); ?>
<ide> <?php \$component->withAttributes([]); ?> @endcomponentClass", trim($result));
<ide> }
<ide>
<ide> public function testColonAttributesIsEscapedIfStrings()
<ide> $result = (new ComponentTagCompiler(['profile' => TestProfileComponent::class]))->compileTags('<x-profile :src="\'foo\'"></x-profile>');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestProfileComponent', [])
<add><?php \$component->withName('profile'); ?>
<ide> <?php \$component->withAttributes(['src' => \Illuminate\View\Compilers\BladeCompiler::sanitizeComponentAttribute('foo')]); ?> @endcomponentClass", trim($result));
<ide> }
<ide>
<ide> public function testColonNestedComponentParsing()
<ide> $result = (new ComponentTagCompiler(['foo:alert' => TestAlertComponent::class]))->compileTags('<x-foo:alert></x-foo:alert>');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestAlertComponent', [])
<add><?php \$component->withName('foo:alert'); ?>
<ide> <?php \$component->withAttributes([]); ?> @endcomponentClass", trim($result));
<ide> }
<ide>
<ide> public function testColonStartingNestedComponentParsing()
<ide> $result = (new ComponentTagCompiler(['foo:alert' => TestAlertComponent::class]))->compileTags('<x:foo:alert></x-foo:alert>');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestAlertComponent', [])
<add><?php \$component->withName('foo:alert'); ?>
<ide> <?php \$component->withAttributes([]); ?> @endcomponentClass", trim($result));
<ide> }
<ide>
<ide> public function testSelfClosingComponentsCanBeCompiled()
<ide> $result = (new ComponentTagCompiler(['alert' => TestAlertComponent::class]))->compileTags('<div><x-alert/></div>');
<ide>
<ide> $this->assertSame("<div> @component('Illuminate\Tests\View\Blade\TestAlertComponent', [])
<add><?php \$component->withName('alert'); ?>
<ide> <?php \$component->withAttributes([]); ?>\n".
<ide> '@endcomponentClass </div>', trim($result));
<ide> }
<ide> public function testComponentsCanBeCompiledWithHyphenAttributes()
<ide> $result = (new ComponentTagCompiler(['alert' => TestAlertComponent::class]))->compileTags('<x-alert class="bar" wire:model="foo" x-on:click="bar" @click="baz" />');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestAlertComponent', [])
<add><?php \$component->withName('alert'); ?>
<ide> <?php \$component->withAttributes(['class' => 'bar','wire:model' => 'foo','x-on:click' => 'bar','@click' => 'baz']); ?>\n".
<ide> '@endcomponentClass', trim($result));
<ide> }
<ide> public function testSelfClosingComponentsCanBeCompiledWithDataAndAttributes()
<ide> $result = (new ComponentTagCompiler(['alert' => TestAlertComponent::class]))->compileTags('<x-alert title="foo" class="bar" wire:model="foo" />');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestAlertComponent', ['title' => 'foo'])
<add><?php \$component->withName('alert'); ?>
<ide> <?php \$component->withAttributes(['class' => 'bar','wire:model' => 'foo']); ?>\n".
<ide> '@endcomponentClass', trim($result));
<ide> }
<ide> public function testComponentsCanHaveAttachedWord()
<ide> $result = (new ComponentTagCompiler(['profile' => TestProfileComponent::class]))->compileTags('<x-profile></x-profile>Words');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestProfileComponent', [])
<add><?php \$component->withName('profile'); ?>
<ide> <?php \$component->withAttributes([]); ?> @endcomponentClass Words", trim($result));
<ide> }
<ide>
<ide> public function testSelfClosingComponentsCanHaveAttachedWord()
<ide> $result = (new ComponentTagCompiler(['alert' => TestAlertComponent::class]))->compileTags('<x-alert/>Words');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestAlertComponent', [])
<add><?php \$component->withName('alert'); ?>
<ide> <?php \$component->withAttributes([]); ?>\n".
<ide> '@endcomponentClass Words', trim($result));
<ide> }
<ide> public function testSelfClosingComponentsCanBeCompiledWithBoundData()
<ide> $result = (new ComponentTagCompiler(['alert' => TestAlertComponent::class]))->compileTags('<x-alert :title="$title" class="bar" />');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestAlertComponent', ['title' => \$title])
<add><?php \$component->withName('alert'); ?>
<ide> <?php \$component->withAttributes(['class' => 'bar']); ?>\n".
<ide> '@endcomponentClass', trim($result));
<ide> }
<ide> public function testPairedComponentTags()
<ide> </x-alert>');
<ide>
<ide> $this->assertSame("@component('Illuminate\Tests\View\Blade\TestAlertComponent', [])
<add><?php \$component->withName('alert'); ?>
<ide> <?php \$component->withAttributes([]); ?>
<ide> @endcomponentClass", trim($result));
<ide> }
<ide> public function testClasslessComponents()
<ide> $result = (new ComponentTagCompiler([]))->compileTags('<x-anonymous-component :name="\'Taylor\'" :age="31" wire:model="foo" />');
<ide>
<ide> $this->assertSame("@component('Illuminate\View\AnonymousComponent', ['view' => 'components.anonymous-component','data' => ['name' => 'Taylor','age' => 31,'wire:model' => 'foo']])
<add><?php \$component->withName('anonymous-component'); ?>
<ide> <?php \$component->withAttributes(['name' => \Illuminate\View\Compilers\BladeCompiler::sanitizeComponentAttribute('Taylor'),'age' => 31,'wire:model' => 'foo']); ?>\n".
<ide> '@endcomponentClass', trim($result));
<ide> } | 3 |
Javascript | Javascript | relax expectations in test-icu-transcode | 793c63073a187e1271797f4481d826e26ed63619 | <ide><path>test/parallel/test-icu-transcode.js
<ide> if (!common.hasIntl)
<ide>
<ide> const buffer = require('buffer');
<ide> const assert = require('assert');
<del>const orig = Buffer.from('tést €', 'utf8');
<add>const orig = Buffer.from('těst ☕', 'utf8');
<ide>
<ide> // Test Transcoding
<ide> const tests = {
<del> 'latin1': [0x74, 0xe9, 0x73, 0x74, 0x20, 0x3f],
<add> 'latin1': [0x74, 0x3f, 0x73, 0x74, 0x20, 0x3f],
<ide> 'ascii': [0x74, 0x3f, 0x73, 0x74, 0x20, 0x3f],
<del> 'ucs2': [0x74, 0x00, 0xe9, 0x00, 0x73,
<add> 'ucs2': [0x74, 0x00, 0x1b, 0x01, 0x73,
<ide> 0x00, 0x74, 0x00, 0x20, 0x00,
<del> 0xac, 0x20]
<add> 0x15, 0x26]
<ide> };
<ide>
<ide> for (const test in tests) { | 1 |
Javascript | Javascript | add glsl comments to explain the math a bit | 40f015ffaf94421e1ac6378de9eed56bd5027161 | <ide><path>src/renderers/shaders/ShaderChunk/morphnormal_vertex.glsl.js
<ide> export default /* glsl */`
<ide> #ifdef USE_MORPHNORMALS
<ide>
<add> // morphTargetBaseInfluence is set based on BufferGeometry.morphTargetsRelative value:
<add> // When morphTargetsRelative is false, this is set to 1 - sum(influences); this results in normal = sum((target - base) * influence)
<add> // When morphTargetsRelative is true, this is set to 1; as a result, all morph targets are simply added to the base after weighting
<ide> objectNormal *= morphTargetBaseInfluence;
<ide> objectNormal += morphNormal0 * morphTargetInfluences[ 0 ];
<ide> objectNormal += morphNormal1 * morphTargetInfluences[ 1 ];
<ide><path>src/renderers/shaders/ShaderChunk/morphtarget_vertex.glsl.js
<ide> export default /* glsl */`
<ide> #ifdef USE_MORPHTARGETS
<ide>
<add> // morphTargetBaseInfluence is set based on BufferGeometry.morphTargetsRelative value:
<add> // When morphTargetsRelative is false, this is set to 1 - sum(influences); this results in position = sum((target - base) * influence)
<add> // When morphTargetsRelative is true, this is set to 1; as a result, all morph targets are simply added to the base after weighting
<ide> transformed *= morphTargetBaseInfluence;
<ide> transformed += morphTarget0 * morphTargetInfluences[ 0 ];
<ide> transformed += morphTarget1 * morphTargetInfluences[ 1 ]; | 2 |
Go | Go | add compareconfig test | 48a892bee5270feea3d0e7b64963acb38dbd634b | <ide><path>utils_test.go
<ide> func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e
<ide> return
<ide> }
<ide>
<add>func TestCompareConfig(t *testing.T) {
<add> config1 := Config{
<add> Dns: []string{"1.1.1.1", "2.2.2.2"},
<add> PortSpecs: []string{"1111:1111", "2222:2222"},
<add> Env: []string{"VAR1=1", "VAR2=2"},
<add> }
<add> config2 := Config{
<add> Dns: []string{"0.0.0.0", "2.2.2.2"},
<add> PortSpecs: []string{"1111:1111", "2222:2222"},
<add> Env: []string{"VAR1=1", "VAR2=2"},
<add> }
<add> config3 := Config{
<add> Dns: []string{"1.1.1.1", "2.2.2.2"},
<add> PortSpecs: []string{"0000:0000", "2222:2222"},
<add> Env: []string{"VAR1=1", "VAR2=2"},
<add> }
<add> if CompareConfig(&config1, &config2) {
<add> t.Fatalf("CompareConfig should return false, Dns are different")
<add> }
<add> if CompareConfig(&config1, &config3) {
<add> t.Fatalf("CompareConfig should return false, PortSpecs are different")
<add> }
<add>}
<add>
<ide> func TestMergeConfig(t *testing.T) {
<ide> volumesImage := make(map[string]struct{})
<ide> volumesImage["/test1"] = struct{}{} | 1 |
Javascript | Javascript | fix lint error | 8c7808991c6bb7bbe96cc2bd1ac869515e461f8f | <ide><path>lib/optimize/ConcatenatedModule.js
<ide> class ConcatenatedModule extends Module {
<ide> if(moduleToInfoMap.get(this.rootModule).needCompatibilityFlag) {
<ide> result.add(`Object.defineProperty(${this.rootModule.exportsArgument || "exports"}, "__esModule", { value: true });\n`);
<ide> }
<del> let generated;
<del> do {
<del> generated = false;
<del> modulesWithInfo.forEach(info => {
<del> if(info.needNamespaceObject && !info.namespaceObjectSource) {
<del> const name = info.exportMap.get(true);
<del> const nsObj = [`var ${name} = {};`];
<del> for(const exportName of info.module.providedExports) {
<del> const finalName = getFinalName(info, exportName, moduleToInfoMap, requestShortener);
<del> nsObj.push(`__webpack_require__.d(${name}, ${JSON.stringify(exportName)}, function() { return ${finalName}; });`);
<del> }
<del> info.namespaceObjectSource = nsObj.join("\n") + "\n";
<del> generated = true;
<add> let generated = true;
<add> const ensureNsObjSource = info => {
<add> if(info.needNamespaceObject && !info.namespaceObjectSource) {
<add> const name = info.exportMap.get(true);
<add> const nsObj = [`var ${name} = {};`];
<add> for(const exportName of info.module.providedExports) {
<add> const finalName = getFinalName(info, exportName, moduleToInfoMap, requestShortener);
<add> nsObj.push(`__webpack_require__.d(${name}, ${JSON.stringify(exportName)}, function() { return ${finalName}; });`);
<ide> }
<del> });
<del> } while(generated);
<add> info.namespaceObjectSource = nsObj.join("\n") + "\n";
<add> generated = true;
<add> }
<add> };
<add> while(generated) {
<add> generated = false;
<add> modulesWithInfo.forEach(ensureNsObjSource);
<add> }
<ide> modulesWithInfo.forEach(info => {
<ide> result.add(`\n// CONCATENATED MODULE: ${info.module.readableIdentifier(requestShortener)}\n`);
<ide> if(info.namespaceObjectSource) { | 1 |
Javascript | Javascript | set readable lwm to 0 by default | 62dd04027b67f02856713e080651bb24b38206f8 | <ide><path>lib/_stream_readable.js
<ide> function ReadableState(options, stream) {
<ide> // the minimum number of bytes to buffer before emitting 'readable'
<ide> // default to pushing everything out as fast as possible.
<ide> this.lowWaterMark = options.hasOwnProperty('lowWaterMark') ?
<del> options.lowWaterMark : 1024;
<add> options.lowWaterMark : 0;
<ide>
<ide> // cast to ints.
<ide> assert(typeof this.bufferSize === 'number'); | 1 |
PHP | PHP | use the router for absolute urls | 1eaaf6c59e8d614ae1991f2315cbccffa817e92c | <ide><path>src/Illuminate/Auth/Notifications/ResetPassword.php
<ide> public function toMail($notifiable)
<ide> return (new MailMessage)
<ide> ->subject(Lang::get('Reset Password Notification'))
<ide> ->line(Lang::get('You are receiving this email because we received a password reset request for your account.'))
<del> ->action(Lang::get('Reset Password'), url(config('app.url').route('password.reset', ['token' => $this->token, 'email' => $notifiable->getEmailForPasswordReset()], false)))
<add> ->action(Lang::get('Reset Password'), url(route('password.reset', ['token' => $this->token, 'email' => $notifiable->getEmailForPasswordReset()], false)))
<ide> ->line(Lang::get('This password reset link will expire in :count minutes.', ['count' => config('auth.passwords.'.config('auth.defaults.passwords').'.expire')]))
<ide> ->line(Lang::get('If you did not request a password reset, no further action is required.'));
<ide> }
<ide><path>src/Illuminate/Foundation/Testing/Concerns/MakesHttpRequests.php
<ide> protected function prepareUrlForRequest($uri)
<ide> $uri = substr($uri, 1);
<ide> }
<ide>
<del> if (! Str::startsWith($uri, 'http')) {
<del> $uri = config('app.url').'/'.$uri;
<del> }
<del>
<del> return trim($uri, '/');
<add> return trim(url($uri), '/');
<ide> }
<ide>
<ide> /** | 2 |
Javascript | Javascript | add test case for holey array | 226e8ce8fd9fee33e8d8b37dd4d7d9c24483b4f9 | <ide><path>test/Validation.test.js
<ide> describe("Validation", () => {
<ide> - configuration.mode should be one of these:
<ide> \\"development\\" | \\"production\\" | \\"none\\"
<ide> -> Enable production optimizations or development hints."
<add>`)
<add> );
<add>
<add> createTestCase(
<add> "holey array",
<add> // eslint-disable-next-line no-sparse-arrays
<add> [
<add> {
<add> mode: "production"
<add> },
<add> ,
<add> {
<add> mode: "development"
<add> }
<add> ],
<add> msg =>
<add> expect(msg).toMatchInlineSnapshot(`
<add>"Invalid configuration object. Webpack has been initialised using a configuration object that does not match the API schema.
<add> - configuration should be an object."
<ide> `)
<ide> );
<ide> }); | 1 |
Ruby | Ruby | add more test coverage to layouts | ce5538be68d186d414c3e21fef72b8815fc58bd6 | <ide><path>actionview/test/actionpack/abstract/layouts_test.rb
<ide> def index
<ide> render :template => ActionView::Template::Text.new("Hello string!")
<ide> end
<ide>
<add> def action_has_layout_false
<add> render template: ActionView::Template::Text.new("Hello string!")
<add> end
<add>
<ide> def overwrite_default
<ide> render :template => ActionView::Template::Text.new("Hello string!"), :layout => :default
<ide> end
<ide> def index
<ide> end
<ide> end
<ide>
<del> class WithProcReturningNil < Base
<add> class WithProcReturningNil < WithString
<ide> layout proc { nil }
<ide>
<ide> def index
<ide> render template: ActionView::Template::Text.new("Hello nil!")
<ide> end
<ide> end
<ide>
<add> class WithProcReturningFalse < WithString
<add> layout proc { false }
<add>
<add> def index
<add> render template: ActionView::Template::Text.new("Hello false!")
<add> end
<add> end
<add>
<ide> class WithZeroArityProc < Base
<ide> layout proc { "overwrite" }
<ide>
<ide> def show
<ide> end
<ide> end
<ide>
<add> class WithOnlyConditionalFlipped < WithOnlyConditional
<add> layout "hello_override", only: :index
<add> end
<add>
<add> class WithOnlyConditionalFlippedAndInheriting < WithOnlyConditional
<add> layout nil, only: :index
<add> end
<add>
<ide> class WithExceptConditional < WithStringImpliedChild
<ide> layout "overwrite", :except => :show
<ide>
<ide> def show
<ide> end
<ide> end
<ide>
<add> class AbstractWithString < Base
<add> layout "hello"
<add> abstract!
<add> end
<add>
<add> class AbstractWithStringChild < AbstractWithString
<add> def index
<add> render template: ActionView::Template::Text.new("Hello abstract child!")
<add> end
<add> end
<add>
<add> class AbstractWithStringChildDefaultsToInherited < AbstractWithString
<add> layout nil
<add>
<add> def index
<add> render template: ActionView::Template::Text.new("Hello abstract child!")
<add> end
<add> end
<add>
<add> class WithConditionalOverride < WithString
<add> layout "overwrite", only: :overwritten
<add>
<add> def non_overwritten
<add> render template: ActionView::Template::Text.new("Hello non overwritten!")
<add> end
<add>
<add> def overwritten
<add> render template: ActionView::Template::Text.new("Hello overwritten!")
<add> end
<add> end
<add>
<add> class WithConditionalOverrideFlipped < WithConditionalOverride
<add> layout "hello_override", only: :non_overwritten
<add> end
<add>
<add> class WithConditionalOverrideFlippedAndInheriting < WithConditionalOverride
<add> layout nil, only: :non_overwritten
<add> end
<add>
<ide> class TestBase < ActiveSupport::TestCase
<ide> test "when no layout is specified, and no default is available, render without a layout" do
<ide> controller = Blank.new
<ide> class TestBase < ActiveSupport::TestCase
<ide> assert_equal "Overwrite Hello proc!", controller.response_body
<ide> end
<ide>
<del> test "when layout is specified as a proc and the proc returns nil, don't use a layout" do
<add> test "when layout is specified as a proc and the proc returns nil, use inherited layout" do
<ide> controller = WithProcReturningNil.new
<ide> controller.process(:index)
<del> assert_equal "Hello nil!", controller.response_body
<add> assert_equal "With String Hello nil!", controller.response_body
<add> end
<add>
<add> test "when layout is specified as a proc and the proc returns false, use no layout instead of inherited layout" do
<add> controller = WithProcReturningFalse.new
<add> controller.process(:index)
<add> assert_equal "Hello false!", controller.response_body
<ide> end
<ide>
<ide> test "when layout is specified as a proc without parameters it works just the same" do
<ide> class TestBase < ActiveSupport::TestCase
<ide> end
<ide>
<ide> test "when a grandchild has nil layout specified, the child has an implied layout, and the " \
<del> "parent has specified a layout, use the child controller layout" do
<add> "parent has specified a layout, use the grand child controller layout" do
<ide> controller = WithGrandChildOfImplied.new
<ide> controller.process(:index)
<ide> assert_equal "With Grand Child Hello string!", controller.response_body
<ide> end
<ide>
<add> test "a child inherits layout from abstract controller" do
<add> controller = AbstractWithStringChild.new
<add> controller.process(:index)
<add> assert_equal "With String Hello abstract child!", controller.response_body
<add> end
<add>
<add> test "a child inherits layout from abstract controller2" do
<add> controller = AbstractWithStringChildDefaultsToInherited.new
<add> controller.process(:index)
<add> assert_equal "With String Hello abstract child!", controller.response_body
<add> end
<add>
<ide> test "raises an exception when specifying layout true" do
<ide> assert_raises ArgumentError do
<ide> Object.class_eval do
<ide> class ::BadFailLayout < AbstractControllerTests::Layouts::Base
<ide> assert_equal "With Implied Hello index!", controller.response_body
<ide> end
<ide>
<add> test "when specify an :only option which match current action name and is opposite from parent controller" do
<add> controller = WithOnlyConditionalFlipped.new
<add> controller.process(:show)
<add> assert_equal "With Implied Hello show!", controller.response_body
<add> end
<add>
<add> test "when specify an :only option which does not match current action name and is opposite from parent controller" do
<add> controller = WithOnlyConditionalFlipped.new
<add> controller.process(:index)
<add> assert_equal "With Override Hello index!", controller.response_body
<add> end
<add>
<add> test "when specify to inherit and an :only option which match current action name and is opposite from parent controller" do
<add> controller = WithOnlyConditionalFlippedAndInheriting.new
<add> controller.process(:show)
<add> assert_equal "With Implied Hello show!", controller.response_body
<add> end
<add>
<add> test "when specify to inherit and an :only option which does not match current action name and is opposite from parent controller" do
<add> controller = WithOnlyConditionalFlippedAndInheriting.new
<add> controller.process(:index)
<add> assert_equal "Overwrite Hello index!", controller.response_body
<add> end
<add>
<ide> test "when specify an :except option which match current action name" do
<ide> controller = WithExceptConditional.new
<ide> controller.process(:show)
<ide> class ::BadFailLayout < AbstractControllerTests::Layouts::Base
<ide> assert_equal "Overwrite Hello index!", controller.response_body
<ide> end
<ide>
<add> test "when specify overwrite as an :only option which match current action name" do
<add> controller = WithConditionalOverride.new
<add> controller.process(:overwritten)
<add> assert_equal "Overwrite Hello overwritten!", controller.response_body
<add> end
<add>
<add> test "when specify overwrite as an :only option which does not match current action name" do
<add> controller = WithConditionalOverride.new
<add> controller.process(:non_overwritten)
<add> assert_equal "Hello non overwritten!", controller.response_body
<add> end
<add>
<add> test "when specify overwrite as an :only option which match current action name and is opposite from parent controller" do
<add> controller = WithConditionalOverrideFlipped.new
<add> controller.process(:overwritten)
<add> assert_equal "Hello overwritten!", controller.response_body
<add> end
<add>
<add> test "when specify overwrite as an :only option which does not match current action name and is opposite from parent controller" do
<add> controller = WithConditionalOverrideFlipped.new
<add> controller.process(:non_overwritten)
<add> assert_equal "With Override Hello non overwritten!", controller.response_body
<add> end
<add>
<add> test "when specify to inherit and overwrite as an :only option which match current action name and is opposite from parent controller" do
<add> controller = WithConditionalOverrideFlippedAndInheriting.new
<add> controller.process(:overwritten)
<add> assert_equal "Hello overwritten!", controller.response_body
<add> end
<add>
<add> test "when specify to inherit and overwrite as an :only option which does not match current action name and is opposite from parent controller" do
<add> controller = WithConditionalOverrideFlippedAndInheriting.new
<add> controller.process(:non_overwritten)
<add> assert_equal "Overwrite Hello non overwritten!", controller.response_body
<add> end
<add>
<ide> test "layout for anonymous controller" do
<ide> klass = Class.new(WithString) do
<ide> def index
<ide> def index
<ide> controller.process(:index)
<ide> assert_equal "With String index", controller.response_body
<ide> end
<add>
<add> test "when layout is disabled with #action_has_layout? returning false, render no layout" do
<add> controller = WithString.new
<add> controller.instance_eval do
<add> def action_has_layout?
<add> false
<add> end
<add> end
<add> controller.process(:action_has_layout_false)
<add> assert_equal "Hello string!", controller.response_body
<add> end
<ide> end
<ide> end
<ide> end | 1 |
Python | Python | reduce log verbosity in kubernetesexecutor. | b6c5189dadb9c09967ec53c8bca1832852c5500e | <ide><path>airflow/executors/kubernetes_executor.py
<ide>
<ide> import functools
<ide> import json
<add>import logging
<ide> import multiprocessing
<ide> import time
<ide> from datetime import timedelta
<ide> def _run(
<ide> )
<ide> for event in list_worker_pods():
<ide> task = event['object']
<del> self.log.info('Event: %s had an event of type %s', task.metadata.name, event['type'])
<add> self.log.debug('Event: %s had an event of type %s', task.metadata.name, event['type'])
<ide> if event['type'] == 'ERROR':
<ide> return self.process_error(event)
<ide> annotations = task.metadata.annotations
<ide> def process_status(
<ide> self.log.info('Event: Failed to start pod %s', pod_id)
<ide> self.watcher_queue.put((pod_id, namespace, State.FAILED, annotations, resource_version))
<ide> else:
<del> self.log.info('Event: %s Pending', pod_id)
<add> self.log.debug('Event: %s Pending', pod_id)
<ide> elif status == 'Failed':
<ide> self.log.error('Event: %s Failed', pod_id)
<ide> self.watcher_queue.put((pod_id, namespace, State.FAILED, annotations, resource_version))
<ide> def run_next(self, next_job: KubernetesJobType) -> None:
<ide> status
<ide> """
<ide> key, command, kube_executor_config, pod_template_file = next_job
<del> self.log.info('Kubernetes job is %s', key)
<ide>
<ide> dag_id, task_id, run_id, try_number, map_index = key
<ide>
<ide> def run_next(self, next_job: KubernetesJobType) -> None:
<ide> )
<ide> # Reconcile the pod generated by the Operator and the Pod
<ide> # generated by the .cfg file
<add> self.log.info('Creating kubernetes pod for job is %s, with pod name %s', key, pod.metadata.name)
<ide> self.log.debug("Kubernetes running for command %s", command)
<ide> self.log.debug("Kubernetes launching image %s", pod.spec.containers[0].image)
<ide>
<ide> def sync(self) -> None:
<ide> def process_watcher_task(self, task: KubernetesWatchType) -> None:
<ide> """Process the task by watcher."""
<ide> pod_id, namespace, state, annotations, resource_version = task
<del> self.log.info(
<add> self.log.debug(
<ide> 'Attempting to finish pod; pod_id: %s; state: %s; annotations: %s', pod_id, state, annotations
<ide> )
<ide> key = annotations_to_key(annotations=annotations)
<ide> def execute_async(
<ide> executor_config: Any | None = None,
<ide> ) -> None:
<ide> """Executes task asynchronously"""
<del> self.log.info('Add task %s with command %s with executor_config %s', key, command, executor_config)
<add> if self.log.isEnabledFor(logging.DEBUG):
<add> self.log.debug('Add task %s with command %s, executor_config %s', key, command, executor_config)
<add> else:
<add> self.log.info('Add task %s with command %s', key, command)
<add>
<ide> try:
<ide> kube_executor_config = PodGenerator.from_obj(executor_config)
<ide> except Exception:
<del> self.log.error("Invalid executor_config for %s", key)
<add> self.log.error("Invalid executor_config for %s. Executor_config: %s", key, executor_config)
<ide> self.fail(key=key, info="Invalid executor_config passed")
<ide> return
<ide> | 1 |
Python | Python | fix overflowerror on win-amd64 | 58d41159f4c070fd9e43048d15ce176f2a619871 | <ide><path>numpy/random/tests/test_regression.py
<ide> def test_hypergeometric_range(self):
<ide> (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
<ide> ]
<ide> is_64bits = sys.maxsize > 2**32
<del> if is_64bits:
<add> if is_64bits and sys.platform != 'win32':
<ide> args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) # Check for 64-bit systems
<ide> for arg in args:
<ide> assert_(np.random.hypergeometric(*arg) > 0) | 1 |
Javascript | Javascript | add missing method invocation | 178407da40b60267a672ff4f00cc1fe07924a3ea | <ide><path>local-cli/bundle/output/meta.js
<ide> */
<ide> 'use strict';
<ide>
<add>/* global Buffer: true */
<add>
<ide> const crypto = require('crypto');
<ide>
<ide> const isUTF8 = encoding => /^utf-?8$/i.test(encoding);
<ide>
<ide> const constantFor = encoding =>
<ide> /^ascii$/i.test(encoding) ? 1 :
<ide> isUTF8(encoding) ? 2 :
<del> /^(?:utf-?16(?:le)?|ucs-?2)$/ ? 3 : 0;
<add> /^(?:utf-?16(?:le)?|ucs-?2)$/.test(encoding) ? 3 : 0;
<ide>
<ide> module.exports = function(code, encoding) {
<ide> const hash = crypto.createHash('sha1'); | 1 |
Python | Python | set version to v2.2.0 | d4b63bb6ddb6e2de344388fe1327e849e30612c5 | <ide><path>spacy/about.py
<ide> # fmt: off
<ide> __title__ = "spacy"
<del>__version__ = "2.2.0.dev18"
<add>__version__ = "2.2.0"
<ide> __release__ = True
<ide> __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
<ide> __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.