content_type stringclasses 8 values | main_lang stringclasses 7 values | message stringlengths 1 50 | sha stringlengths 40 40 | patch stringlengths 52 962k | file_count int64 1 300 |
|---|---|---|---|---|---|
Python | Python | add hungarian examples (see ) | 485c4f6df5763a01b90117632114f96e28c31738 | <ide><path>spacy/lang/hu/examples.py
<add># coding: utf8
<add>from __future__ import unicode_literals
<add>
<add>
<add>"""
<add>Example sentences to test spaCy and its language models.
<add>
<add>>>> from spacy.lang.hu.examples import sentences
<add>>>> docs = nlp.pipe(sentences)
<add>"""
<add>
<add>
<add>sentences = [
<add> "Az Apple egy brit startup vásárlását tervezi 1 milliárd dollár értékben.",
<add> "San Francisco vezetése mérlegeli a járdát használó szállító robotok betiltását.",
<add> "London az Egyesült Királyság egy nagy városa."
<add>] | 1 |
Javascript | Javascript | add comment and ignore rule on regular expression | ebf137954cbc0ea25dd10817f820de7840a92be7 | <ide><path>packages/line-ending-selector/lib/main.js
<ide> import StatusBarItem from './status-bar-item'
<ide> import helpers from './helpers'
<ide>
<ide> const LineEndingRegExp = /\r\n|\n/g
<add>
<add>// the following regular expression is executed natively via the `substring` package,
<add>// where `\A` corresponds to the beginning of the string.
<add>// More info: https://github.com/atom/line-ending-selector/pull/56
<add>// eslint-disable-next-line no-useless-escape
<ide> const LFRegExp = /(\A|[^\r])\n/g
<ide> const CRLFRegExp = /\r\n/g
<ide> | 1 |
Text | Text | add example for running with v8-inspector | bdb801261a8323a5a684398b60e6b9ca470c72a8 | <ide><path>doc/api/debugger.md
<ide> e.g. `--inspect=9222` will accept DevTools connections on port 9222.
<ide> To break on the first line of the application code, provide the `--debug-brk`
<ide> flag in addition to `--inspect`.
<ide>
<add>```txt
<add>$ node --inspect index.js
<add>Debugger listening on port 9229.
<add>Warning: This is an experimental feature and could change at any time.
<add>To start debugging, open the following URL in Chrome:
<add> chrome-devtools://devtools/remote/serve_file/@60cd6e859b9f557d2312f5bf532f6aec5f284980/inspector.html?experiments=true&v8only=true&ws=localhost:9229/node
<add>```
<add>
<ide> [TCP-based protocol]: https://github.com/v8/v8/wiki/Debugging-Protocol | 1 |
Javascript | Javascript | add filereader to eslint globals | 35b6d2e52bf3cab069c272bfd46490a2cd59673f | <ide><path>packages/eslint-config-react-native-community/index.js
<ide> module.exports = {
<ide> EventTarget: false,
<ide> exports: false,
<ide> fetch: false,
<add> FileReader: false,
<ide> FormData: false,
<ide> global: false,
<ide> Headers: false, | 1 |
Python | Python | resolve line-too-long in mixed_precision | ebb5e0e3f5e0b02cad2b54144022084301588ac5 | <ide><path>keras/mixed_precision/autocast_variable.py
<ide> class AutoCastVariable(tf.Variable, tf.__internal__.types.Tensor):
<ide> """Variable that will cast itself to a different dtype in applicable contexts.
<ide>
<ide> This class wraps a floating-point `tf.Variable`. It emulates the variable
<del> interface and delegates to the wrapped variable, but it additionally will cast
<del> the wrapped variable under an `enable_auto_cast_variables(dtype)` context
<del> manager.
<add> interface and delegates to the wrapped variable, but it additionally will
<add> cast the wrapped variable under an `enable_auto_cast_variables(dtype)`
<add> context manager.
<ide>
<ide> For example:
<ide>
<ide> class AutoCastVariable(tf.Variable, tf.__internal__.types.Tensor):
<ide> tf.float16
<ide>
<ide> The purpose of this class is to allow Keras layers to create variables in
<del> float32, and automatically cast them to float16 or bfloat16 when the layer is
<del> called.
<add> float32, and automatically cast them to float16 or bfloat16 when the layer
<add> is called.
<ide> """
<ide>
<ide> def __init__(self, variable):
<ide> def __init__(self, variable):
<ide> "type: %s" % variable.dtype.name
<ide> )
<ide> self._variable = variable
<del> # 'delegate' means AutoCastVariable.op return self._variable.op, which will
<del> # raise an AttributeError in Eager (as intended). If set to any other value,
<del> # AutoCastVariable.op returns that value instead, which is used to set the
<del> # op attribute in AutoCastVariable.assign().
<add> # 'delegate' means AutoCastVariable.op return self._variable.op, which
<add> # will raise an AttributeError in Eager (as intended). If set to any
<add> # other value, AutoCastVariable.op returns that value instead, which is
<add> # used to set the op attribute in AutoCastVariable.assign().
<ide> self._op = "delegate"
<ide>
<ide> def _should_cast(self):
<ide> def __getattr__(self, name):
<ide> def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
<ide> """Converts this variable to a tensor."""
<ide> if as_ref:
<del> # This ValueError should not occur in practice since it is impossible to
<del> # pass as_ref=True using public APIs.
<add> # This ValueError should not occur in practice since it is
<add> # impossible to pass as_ref=True using public APIs.
<ide> raise ValueError(
<ide> "Cannot convert AutoCastVariable to a tensor if "
<ide> "as_ref=True is passed to convert_to_tensor"
<ide> def __repr__(self):
<ide> # * 'count_up_to': This method only applies to int variables, which cannot
<ide> # be wrapped with an AutoCastVariable.
<ide> # * 'ref': Instead we inherit the definition from Variable.
<del> # If we defined and delegated to Variable, the ref of an AutoCastVariable
<del> # would be the same as the ref of the underlying variable, which would be
<del> # strange as they are different Python objects.
<add> # If we defined and delegated to Variable, the ref of an
<add> # AutoCastVariable would be the same as the ref of the underlying
<add> # variable, which would be strange as they are different Python objects.
<ide>
<ide> def set_shape(self, shape):
<ide> return self._variable.set_shape(self, shape)
<ide> def _apply_assign_update(
<ide> self, update_fn, value, use_locking=None, name=None, read_value=True
<ide> ):
<ide> # TODO(b/146181571): This logic can be simplified once
<del> # DistributedVariable.assign returns a DistributedVariable. Currently for
<del> # MirroredStrategy, it returns a Mirrored value.
<add> # DistributedVariable.assign returns a DistributedVariable. Currently
<add> # for MirroredStrategy, it returns a Mirrored value.
<ide> if tf.compat.v1.executing_eagerly_outside_functions():
<ide> assign_op = update_fn(value, use_locking, name, False)
<ide> if read_value:
<del> # We create a new AutoCastVariable with the same underlying tf.Variable.
<del> # The new AutoCastVariable is identical except the 'op' attribute is
<del> # defined. This matches the behavior of tf.Variable.assign.
<add> # We create a new AutoCastVariable with the same underlying
<add> # tf.Variable. The new AutoCastVariable is identical except the
<add> # 'op' attribute is defined. This matches the behavior of
<add> # tf.Variable.assign.
<ide> var = create_autocast_variable(self._variable)
<ide> var._op = assign_op # pylint:disable=protected-access
<ide> return var
<ide> def get_shape(self):
<ide> def _gather_saveables_for_checkpoint(self):
<ide> # By delegating this method to the wrapped variable, checkpoints with
<ide> # AutoCastVariables are identical to checkpoints with normal variables.
<del> # Therefore models checkpointed with AutoCastVariables can be restored on
<del> # models with normal variables, and vice versa.
<add> # Therefore models checkpointed with AutoCastVariables can be restored
<add> # on models with normal variables, and vice versa.
<ide> return (
<ide> self._variable._gather_saveables_for_checkpoint()
<ide> ) # pylint:disable=protected-access
<ide> def __div__(self, o):
<ide> try:
<ide> return self.read_value().__div__(o)
<ide> except AttributeError:
<del> # See https://docs.python.org/3/library/constants.html#NotImplemented
<add> # See
<add> # https://docs.python.org/3/library/constants.html#NotImplemented
<ide> return NotImplemented
<ide>
<ide> def __rdiv__(self, o):
<ide> try:
<ide> return self.read_value().__rdiv__(o)
<ide> except AttributeError:
<del> # See https://docs.python.org/3/library/constants.html#NotImplemented
<add> # See
<add> # https://docs.python.org/3/library/constants.html#NotImplemented
<ide> return NotImplemented
<ide>
<ide> def __matmul__(self, o):
<ide> try:
<ide> return self.read_value().__matmul__(o)
<ide> except AttributeError:
<del> # See https://docs.python.org/3/library/constants.html#NotImplemented
<add> # See
<add> # https://docs.python.org/3/library/constants.html#NotImplemented
<ide> return NotImplemented
<ide>
<ide> def __rmatmul__(self, o):
<ide> try:
<ide> return self.read_value().__rmatmul__(o)
<ide> except AttributeError:
<del> # See https://docs.python.org/3/library/constants.html#NotImplemented
<add> # See
<add> # https://docs.python.org/3/library/constants.html#NotImplemented
<ide> return NotImplemented
<ide>
<ide> # pylint: enable=multiple-statements
<ide> def __rmatmul__(self, o):
<ide> def create_autocast_variable(variable):
<ide> """Creates an AutoCastVariable that wraps another variable.
<ide>
<del> This typically just returns `AutoCastVariable(variable)`. But, if the variable
<del> is a DistributedVariable or one of its subclasses, we instead dynamically
<del> create a class that subclasses from both AutoCastVariable and
<add> This typically just returns `AutoCastVariable(variable)`. But, if the
<add> variable is a DistributedVariable or one of its subclasses, we instead
<add> dynamically create a class that subclasses from both AutoCastVariable and
<ide> variable.__class__. This is so the returned variable will still pass
<ide> `isinstance(variable, variable.__class__)`, which is required for
<ide> DistributedVariables and its subclasses to work properly.
<ide><path>keras/mixed_precision/autocast_variable_test.py
<ide> def set_cpu_logical_devices_to_at_least(num):
<ide> raise RuntimeError("No CPU found")
<ide> if len(physical_devices) >= num:
<ide> return
<del> # By default each physical device corresponds to one logical device. We create
<del> # multiple logical devices for the last physical device so that we have `num`
<del> # logical devices.
<add> # By default each physical device corresponds to one logical device. We
<add> # create multiple logical devices for the last physical device so that we
<add> # have `num` logical devices.
<ide> num = num - len(physical_devices) + 1
<ide> logical_devices = []
<ide> for _ in range(num):
<ide> def test_read(self, distribution):
<ide>
<ide> def test_sparse_reads(self):
<ide> x = get_var([1.0, 2], tf.float32)
<del> # DistributedVariables do not support sparse_read or gather_nd, so we pass
<del> # distribute=False
<add> # DistributedVariables do not support sparse_read or gather_nd, so we
<add> # pass distribute=False
<ide> x = autocast_variable.create_autocast_variable(x)
<ide> self.evaluate(x.initializer)
<ide>
<ide> def test_method_delegations(self, distribution):
<ide> with self.test_session(), distribution.scope():
<ide> for read_dtype in (tf.float32, tf.float16):
<ide> if tf.distribute.has_strategy() and not tf.executing_eagerly():
<del> # MirroredVariable.assign will (incorrectly) return a Mirrored value
<del> # instead of a MirroredVariable in graph mode.
<del> # So we cannot properly wrap it in an AutoCastVariable.
<add> # MirroredVariable.assign will (incorrectly) return a
<add> # Mirrored value instead of a MirroredVariable in graph
<add> # mode. So we cannot properly wrap it in an
<add> # AutoCastVariable.
<ide> evaluate = self.evaluate
<ide> else:
<ide>
<ide> def evaluate(var):
<ide> self.assertEqual(self.evaluate(x.initialized_value()), 7)
<ide> if not tf.executing_eagerly():
<ide> if not tf.distribute.has_strategy():
<del> # These functions are not supported for DistributedVariables
<add> # These functions are not supported for
<add> # DistributedVariables
<ide> x.load(9)
<ide> self.assertEqual(x.eval(), 9)
<ide> self.assertEqual(self.evaluate(x.initial_value), 7)
<ide> self.assertEqual(x.op, x._variable.op)
<ide> self.assertEqual(x.graph, x._variable.graph)
<ide> if not tf.distribute.has_strategy():
<del> # These attributes are not supported for DistributedVariables
<add> # These attributes are not supported for
<add> # DistributedVariables
<ide> self.assertIsNone(x.constraint)
<ide> self.assertEqual(x.initializer, x._variable.initializer)
<ide> self.assertEqual(evaluate(x.assign(8)), 8)
<ide> def run_and_check():
<ide> # Attempt to assign float16 values
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<del> "conversion requested dtype float32 for Tensor with dtype float16",
<add> "conversion requested dtype float32 for Tensor with dtype "
<add> "float16",
<ide> ):
<ide> self.evaluate(x.assign(v2))
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<del> "conversion requested dtype float32 for Tensor with dtype float16",
<add> "conversion requested dtype float32 for Tensor with dtype "
<add> "float16",
<ide> ):
<ide> self.evaluate(x.assign_add(v2))
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<del> "conversion requested dtype float32 for Tensor with dtype float16",
<add> "conversion requested dtype float32 for Tensor with dtype "
<add> "float16",
<ide> ):
<ide> self.evaluate(x.assign_sub(v2))
<ide>
<ide> def run_and_check():
<ide> self.assertAllClose(3.0, self.evaluate(x.assign_sub(3.0)))
<ide>
<ide> # Assign multiple times
<del> # This currently doesn't work in graph mode if a strategy is used
<add> # This currently doesn't work in graph mode if a strategy is
<add> # used
<ide> if not tf.distribute.has_strategy() or tf.executing_eagerly():
<ide> assign = x.assign(1.0)
<ide> self.assertAllClose(1.0, self.evaluate(assign))
<ide> def test_op_attribute(self, distribution):
<ide> x = get_var(0.0, tf.float32)
<ide> x = autocast_variable.create_autocast_variable(x)
<ide>
<del> # Variable.op raises an AttributeError in Eager mode and is an op in graph
<del> # mode. Variable.assign(...).op is None in Eager mode and an op in Graph
<del> # mode or a tf.function. We test this is also true of AutoCastVariable.
<add> # Variable.op raises an AttributeError in Eager mode and is an op in
<add> # graph mode. Variable.assign(...).op is None in Eager mode and an
<add> # op in Graph mode or a tf.function. We test this is also true of
<add> # AutoCastVariable.
<ide> if tf.executing_eagerly():
<ide> with self.assertRaises(AttributeError):
<ide> x.op # pylint: disable=pointless-statement
<ide> def test_assign_stays_in_true_dtype(self, distribution):
<ide> x = get_var(1.0, tf.float32)
<ide> x = autocast_variable.create_autocast_variable(x)
<ide> self.evaluate(x.initializer)
<del> # small_val is a value such that 1.0 + small_val == 1.0 in fp16, but not
<del> # in fp32
<add> # small_val is a value such that 1.0 + small_val == 1.0 in fp16, but
<add> # not in fp32
<ide> small_val = np.finfo("float16").eps / 2
<ide> small_tensor = tf.constant(small_val, dtype=tf.float32)
<ide> with autocast_variable.enable_auto_cast_variables(tf.float16):
<del> # Variable should be increased, despite it appearing to be the same
<del> # float16 value.
<add> # Variable should be increased, despite it appearing to be the
<add> # same float16 value.
<ide> self.evaluate(x.assign(1.0 + small_tensor))
<ide> self.assertEqual(1.0, self.evaluate(x.value()))
<ide> self.assertEqual(1.0 + small_val, self.evaluate(x))
<ide> def test_thread_local_autocast_dtype(self):
<ide> with autocast_variable.enable_auto_cast_variables(tf.float16):
<ide> self.assertEqual(tf.identity(x).dtype, tf.float16)
<ide>
<del> # New threads should not see the modified value of the autocast dtype.
<add> # New threads should not see the modified value of the autocast
<add> # dtype.
<ide> var_dtype = None
<ide>
<ide> def f():
<ide> def test_invalid_wrapped_variable(self, distribution):
<ide> autocast_variable.create_autocast_variable(x)
<ide>
<ide> def test_repr(self):
<del> # We do not test with DistributionStrategy because we do not want to rely on
<del> # the exact __repr__ output of a DistributedVariable.
<add> # We do not test with DistributionStrategy because we do not want to
<add> # rely on the exact __repr__ output of a DistributedVariable.
<ide> x = get_var(1.0, tf.float32, name="x")
<ide> x = autocast_variable.create_autocast_variable(x)
<ide> if tf.executing_eagerly():
<ide> def test_optimizer(self, optimizer_class, use_tf_function):
<ide> opt = optimizer_class(learning_rate=1.0)
<ide>
<ide> def f():
<del> # Minimize both the AutoCastVariable and the normal tf.Variable. Both
<del> # variables should be updated to the same value.
<add> # Minimize both the AutoCastVariable and the normal tf.Variable.
<add> # Both variables should be updated to the same value.
<ide> op = opt.minimize(lambda: x + y, var_list=[x, y])
<ide> return (
<ide> None
<ide> def f():
<ide> self.evaluate(op)
<ide> # Assert the AutoCastVariable has changed from its initial value
<ide> self.assertNotEqual(self.evaluate(x), 1.0)
<del> # Assert AutoCastVariable is updated correctly by comparing it to the normal
<del> # variable
<add> # Assert AutoCastVariable is updated correctly by comparing it to the
<add> # normal variable
<ide> self.assertAlmostEqual(self.evaluate(x), self.evaluate(y))
<ide> if optimizer_class in (
<ide> gradient_descent_v2.SGD,
<ide><path>keras/mixed_precision/device_compatibility_check.py
<ide> def _log_device_compatibility_check(policy_name, gpu_details_list):
<ide> `tf.config.experimental.get_device_details()`.
<ide> """
<ide> if policy_name != "mixed_float16":
<del> # TODO(b/145686977): Log if the policy is 'mixed_bfloat16'. This requires
<del> # checking if a TPU is available.
<add> # TODO(b/145686977): Log if the policy is 'mixed_bfloat16'. This
<add> # requires checking if a TPU is available.
<ide> return
<ide> supported_device_strs = []
<ide> unsupported_device_strs = []
<ide><path>keras/mixed_precision/device_compatibility_check_test.py
<ide> def test_supported(self):
<ide> details_list = [device_details("GPU 1", (7, 1))]
<ide> regex = re.compile(
<ide> r".*compatibility check \(mixed_float16\): OK\n"
<del> r"Your GPU will likely run quickly with dtype policy mixed_float16 as "
<del> r"it has compute capability of at least 7.0. Your GPU: GPU 1, compute "
<del> r"capability 7.1",
<add> r"Your GPU will likely run quickly with dtype policy mixed_float16 "
<add> r"as it has compute capability of at least 7.0. Your GPU: GPU 1, "
<add> r"compute capability 7.1",
<ide> flags=re.MULTILINE,
<ide> )
<ide> self._test_compat_check(details_list, False, regex)
<ide> def test_supported(self):
<ide> ]
<ide> regex = re.compile(
<ide> r".*compatibility check \(mixed_float16\): OK\n"
<del> r"Your GPUs will likely run quickly with dtype policy mixed_float16 as "
<del> r"they all have compute capability of at least 7.0",
<add> r"Your GPUs will likely run quickly with dtype policy "
<add> r"mixed_float16 as they all have compute capability of "
<add> r"at least 7.0",
<ide> flags=re.MULTILINE,
<ide> )
<ide> self._test_compat_check(details_list, False, regex)
<ide> def test_unsupported(self):
<ide> regex = re.compile(
<ide> r".*compatibility check \(mixed_float16\): WARNING\n"
<ide> r"Your GPU may run slowly with dtype policy mixed_float16.*\n"
<del> r" Unknown GPU, no compute capability \(probably not an Nvidia GPU\)\n"
<del> r"See.*",
<add> r" Unknown GPU, no compute capability "
<add> r"\(probably not an Nvidia GPU\)\nSee.*",
<ide> flags=re.MULTILINE,
<ide> )
<ide> self._test_compat_check(details_list, True, regex)
<ide> def test_unsupported(self):
<ide> details_list = []
<ide> regex = re.compile(
<ide> r".*compatibility check \(mixed_float16\): WARNING\n"
<del> r"The dtype policy mixed_float16 may run slowly because this machine "
<del> r"does not have a GPU",
<add> r"The dtype policy mixed_float16 may run slowly because this "
<add> r"machine does not have a GPU",
<ide> flags=re.MULTILINE,
<ide> )
<ide> self._test_compat_check(details_list, True, regex)
<ide> def test_mix_of_supported_and_unsupported(self):
<ide> ]
<ide> regex = re.compile(
<ide> r".*compatibility check \(mixed_float16\): WARNING\n"
<del> r"Some of your GPUs may run slowly with dtype policy mixed_float16.*\n"
<del> r" GPU 1, compute capability 7.0 \(x2\)\n"
<add> r"Some of your GPUs may run slowly with dtype policy "
<add> r"mixed_float16.*\n GPU 1, compute capability 7.0 \(x2\)\n"
<ide> r" GPU 2, compute capability 6.0\n"
<ide> r"See.*",
<ide> flags=re.MULTILINE,
<ide><path>keras/mixed_precision/layer_correctness_test.py
<ide> def test_layer(
<ide> ):
<ide> """Tests a layer by comparing the float32 and mixed precision weights.
<ide>
<del> A float32 layer, a mixed precision layer, and a distributed mixed precision
<del> layer are run. The three layers are identical other than their dtypes and
<del> distribution strategies. The outputs after predict() and weights after fit()
<del> are asserted to be close.
<add> A float32 layer, a mixed precision layer, and a distributed mixed
<add> precision layer are run. The three layers are identical other than their
<add> dtypes and distribution strategies. The outputs after predict() and
<add> weights after fit() are asserted to be close.
<ide>
<ide> Args:
<del> f32_layer_fn: A function returning a float32 layer. The other two layers
<del> will automatically be created from this
<add> f32_layer_fn: A function returning a float32 layer. The other two
<add> layers will automatically be created from this.
<ide> input_shape: The shape of the input to the layer, including the batch
<ide> dimension. Or a list of shapes if the layer takes multiple inputs.
<ide> rtol: The relative tolerance to be asserted.
<ide> atol: The absolute tolerance to be asserted.
<del> input_data: A Numpy array with the data of the input. If None, input data
<del> will be randomly generated
<add> input_data: A Numpy array with the data of the input. If None, input
<add> data will be randomly generated.
<ide> """
<ide>
<ide> if (
<ide> def test_layer(
<ide> # Compute per_replica_input_shapes for the distributed model
<ide> global_batch_size = input_shapes[0][0]
<ide> assert global_batch_size % strategy.num_replicas_in_sync == 0, (
<del> "The number of replicas, %d, does not divide the global batch size of "
<del> "%d" % (strategy.num_replicas_in_sync, global_batch_size)
<add> "The number of replicas, %d, does not divide the global batch "
<add> "size of %d" % (strategy.num_replicas_in_sync, global_batch_size)
<ide> )
<ide> per_replica_batch_size = (
<ide> global_batch_size // strategy.num_replicas_in_sync
<ide> def test_layer(
<ide>
<ide> # Generate input data
<ide> if input_data is None:
<del> # Cast inputs to float16 to avoid measuring error from having f16 layers
<del> # cast to float16.
<add> # Cast inputs to float16 to avoid measuring error from having f16
<add> # layers cast to float16.
<ide> input_data = [
<ide> np.random.normal(size=s).astype("float16") for s in input_shapes
<ide> ]
<ide><path>keras/mixed_precision/layer_test.py
<ide> def build(self, _):
<ide> self.v = self.add_weight("v", dtype="int32", trainable=False)
<ide>
<ide> def call(self, inputs):
<del> # Only float variables should be autocasted. This will fail if self.v is
<del> # autocasted to float32
<add> # Only float variables should be autocasted. This will fail if
<add> # self.v is autocasted to float32
<ide> return tf.cast(inputs, "int32") + self.v
<ide>
<ide> x = tf.constant([1.0])
<ide> def test_gradient(self, strategy_fn):
<ide> with strategy_fn().scope() as strategy:
<ide> with policy.policy_scope("mixed_float16"):
<ide> layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)
<del> # Learning rate is small enough that if applied to a float16 variable,
<del> # the variable will not change. So this tests the learning rate is not
<del> # applied to a float16 value, but instead the float32 variable.
<add> # Learning rate is small enough that if applied to a float16
<add> # variable, the variable will not change. So this tests the
<add> # learning rate is not applied to a float16 value, but instead
<add> # the float32 variable.
<ide> opt = gradient_descent.SGD(2**-14)
<ide>
<ide> def run_fn():
<ide> with tf.GradientTape() as tape:
<ide> y = layer(x)
<del> # Divide by num_replicas_in_sync, as the effective total loss is the
<del> # sum of each of the replica's losses.
<add> # Divide by num_replicas_in_sync, as the effective total
<add> # loss is the sum of each of the replica's losses.
<ide> y /= strategy.num_replicas_in_sync
<ide>
<ide> grad = tape.gradient(y, layer.v)
<ide> def run_fn():
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> self.evaluate(op)
<ide> # The gradient with respective to the variable is 1. Since the
<del> # variable is initialized with 1 and the learning rate is 2**-14, the
<del> # new variable value should be: init_val - gradient * learning_rate,
<del> # which is 1 - 1 * 2**-14
<add> # variable is initialized with 1 and the learning rate is
<add> # 2**-14, the new variable value should be: init_val - gradient
<add> # * learning_rate, which is 1 - 1 * 2**-14
<ide> self.assertEqual(self.evaluate(layer.v), 1 - 2**-14)
<ide>
<ide> def _test_checkpointing_layer_weights(
<ide> self, strategy_fn, mixed_prec_when_saving, mixed_prec_when_loading
<ide> ):
<del> # In this test, we potentially save with mixed precision enabled and load
<del> # with mixed precision disabled, or vice versa. This is possible because
<del> # variables are float32 regardless of whether mixed precision is enabled.
<add> # In this test, we potentially save with mixed precision enabled and
<add> # load with mixed precision disabled, or vice versa. This is possible
<add> # because variables are float32 regardless of whether mixed precision is
<add> # enabled.
<ide> save_policy = "mixed_float16" if mixed_prec_when_saving else "float32"
<ide> load_policy = "mixed_float16" if mixed_prec_when_loading else "float32"
<ide> save_input_dtype = "float16" if mixed_prec_when_saving else "float32"
<ide> def test_config(self, strategy_fn):
<ide> config = layer.get_config()
<ide> self.assertIsNone(config["dtype"])
<ide> layer = mp_test_util.MultiplyLayer.from_config(config)
<del> # If a layer is serialized with the "_infer" policy, when deserialized
<del> # into TF 2 it will have the global policy instead of "_infer". This is
<del> # because "_infer" is serialized into None, and passing dtype=None in
<del> # TensorFlow 2 indicates to use the global policy.
<add> # If a layer is serialized with the "_infer" policy, when
<add> # deserialized into TF 2 it will have the global policy instead of
<add> # "_infer". This is because "_infer" is serialized into None, and
<add> # passing dtype=None in TensorFlow 2 indicates to use the global
<add> # policy.
<ide> self.assertEqual(layer.dtype, "float32")
<ide> self.assertEqual(layer(x).dtype, "float32")
<ide> self.assertEqual(layer.v.dtype, "float32")
<ide>
<ide> @parameterized.named_parameters(*TESTCASES)
<ide> def test_from_config_policy_v1(self, strategy_fn):
<ide> # Test that layers serialized in previous Keras versions with the
<del> # now-deleted PolicyV1 can be deserialized. In such cases, the PolicyV1 will
<del> # be converted to a Policy, since PolicyV1 no longer exists. Unlike Policy,
<del> # PolicyV1 had a "loss_scale" field, which is silently dropped when
<del> # deserialized.
<add> # now-deleted PolicyV1 can be deserialized. In such cases, the PolicyV1
<add> # will be converted to a Policy, since PolicyV1 no longer exists. Unlike
<add> # Policy, PolicyV1 had a "loss_scale" field, which is silently dropped
<add> # when deserialized.
<ide> x = tf.constant([1.0], dtype=tf.float16)
<ide> with strategy_fn().scope():
<ide>
<ide> def test_unsupported_strategy(self):
<ide> mp_test_util.MultiplyLayer(dtype=policy.Policy("float64"))
<ide>
<ide> def test_input_spec_dtype(self):
<del> # Test the InputSpec's dtype is compared against the inputs before the layer
<del> # casts them, not after.
<add> # Test the InputSpec's dtype is compared against the inputs before the
<add> # layer casts them, not after.
<ide> layer = mp_test_util.MultiplyLayer(dtype="float64")
<ide> layer.input_spec = input_spec.InputSpec(dtype="float16")
<ide>
<ide><path>keras/mixed_precision/loss_scale_optimizer.py
<ide> def __init__(self, value):
<ide>
<ide>
<ide> def _is_all_finite(grads):
<del> """Returns a scalar boolean tensor indicating if all gradients are finite."""
<add> """Returns a scalar boolean tensor indicating if all gradients are
<add> finite."""
<ide> is_finite_per_grad = [
<ide> tf.reduce_all(tf.math.is_finite(g)) for g in grads if g is not None
<ide> ]
<ide> def _maybe_warn_about_scaling(
<ide> "You forgot to call LossScaleOptimizer.get_scaled_loss() and "
<ide> "LossScaleOptimizer.get_unscaled_gradients() before calling "
<ide> "LossScaleOptimizer.apply_gradients(). This will likely result in "
<del> "worse model quality, so please call them in the correct places! For "
<del> f"example:{example_code}\nFor more information, see "
<add> "worse model quality, so please call them in the correct places! "
<add> f"For example:{example_code}\nFor more information, see "
<ide> "https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/LossScaleOptimizer"
<ide> )
<ide> elif not loss_has_been_scaled:
<ide> tf_logging.warning(
<ide> "You forgot to call LossScaleOptimizer.get_scaled_loss() before "
<ide> "calling LossScaleOptimizer.apply_gradients() (you did call "
<del> "get_unscaled_gradients() however). This will likely result in worse "
<del> "model quality, so please call get_scaled_loss() in the correct place! "
<del> f"For example:{example_code}\nFor more information, see "
<add> "get_unscaled_gradients() however). This will likely result in "
<add> "worse model quality, so please call get_scaled_loss() in the "
<add> f"correct place! For example:{example_code}\nFor more information, "
<add> "see "
<ide> "https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/LossScaleOptimizer"
<ide> )
<ide> elif not gradients_have_been_unscaled:
<ide> tf_logging.warning(
<ide> "You forgot to call LossScaleOptimizer.get_unscaled_gradients() "
<ide> "before calling LossScaleOptimizer.apply_gradients() (you did call "
<ide> "get_scaled_loss() however). This will likely result in worse "
<del> "model quality, so please call get_unscaled_gradients() in the correct "
<del> f"place! For example:{example_code}\nFor more information, see "
<add> "model quality, so please call get_unscaled_gradients() in the "
<add> f"correct place! For example:{example_code}\nFor more information, "
<add> "see "
<ide> "https://www.tensorflow.org/api_docs/python/tf/keras/mixed_precision/LossScaleOptimizer"
<ide> )
<ide>
<ide> def __init__(self, initial_loss_scale, growth_steps, multiplier):
<ide> initial_value=self._initial_loss_scale,
<ide> )
<ide> # The number of consecutive steps with finite gradients since the last
<del> # nonfinite gradient or change in loss scale. The name is 'good_steps' for
<del> # backwards compatibility with older checkpoints.
<add> # nonfinite gradient or change in loss scale. The name is 'good_steps'
<add> # for backwards compatibility with older checkpoints.
<ide> self._counter = self._add_weight(
<ide> name="good_steps", dtype=tf.int64, initial_value=0
<ide> )
<ide> def update(self, grads):
<ide> all-reduced gradient of the loss with respect to a weight.
<ide>
<ide> Returns:
<del> update_op: In eager mode, None. In graph mode, an op to update the loss
<del> scale.
<add> update_op: In eager mode, None. In graph mode, an op to update the
<add> loss scale.
<ide> should_apply_gradients: Either a bool or a scalar boolean tensor. If
<ide> False, the caller should skip applying `grads` to the variables this
<ide> step.
<ide> def update(self, grads):
<ide> _is_all_finite, args=(grads,)
<ide> )
<ide> # Each replica computed the same `is_finite` value, since `grads` is
<del> # all-reduced across replicas. Arbitrarily take `is_finite` from the first
<del> # replica.
<add> # all-reduced across replicas. Arbitrarily take `is_finite` from the
<add> # first replica.
<ide> is_finite = distribution.experimental_local_results(
<ide> is_finite_per_replica
<ide> )[0]
<ide> class BaseLossScaleOptimizer(metaclass=LossScaleOptimizerMetaclass):
<ide> gradients when float16 is used. To prevent underflow, the loss is multiplied
<ide> (or "scaled") by a certain factor called the "loss scale", which causes
<ide> intermediate gradients to be scaled by the loss scale as well. The final
<del> gradients are divided (or "unscaled") by the loss scale to bring them back to
<del> their original value.
<add> gradients are divided (or "unscaled") by the loss scale to bring them back
<add> to their original value.
<ide>
<ide> `LossScaleOptimizer` wraps another optimizer and applies loss scaling to it.
<del> By default, the loss scale is dynamically updated over time so you do not have
<del> to choose the loss scale. The `minimize` method automatically scales the loss,
<del> unscales the gradients, and updates the loss scale so all you have to do is
<del> wrap your optimizer with a `LossScaleOptimizer` if you use `minimize`. For
<del> example:
<add> By default, the loss scale is dynamically updated over time so you do not
<add> have to choose the loss scale. The `minimize` method automatically scales
<add> the loss, unscales the gradients, and updates the loss scale so all you have
<add> to do is wrap your optimizer with a `LossScaleOptimizer` if you use
<add> `minimize`. For example:
<ide>
<ide> >>> opt = tf.keras.optimizers.SGD(0.25)
<ide> >>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
<ide> class BaseLossScaleOptimizer(metaclass=LossScaleOptimizerMetaclass):
<ide> >>> var.numpy()
<ide> 0.5
<ide>
<del> If a `tf.GradientTape` is used to compute gradients instead of `minimize`, you
<del> must scale the loss and gradients manually. This can be done with the
<add> If a `tf.GradientTape` is used to compute gradients instead of `minimize`,
<add> you must scale the loss and gradients manually. This can be done with the
<ide> `LossScaleOptimizer.get_scaled_loss` and
<ide> `LossScaleOptimizer.get_unscaled_gradients` methods. For example:
<ide>
<ide> class BaseLossScaleOptimizer(metaclass=LossScaleOptimizerMetaclass):
<ide> 0.25
<ide>
<ide> Warning: If you forget to call `get_scaled_loss` or `get_unscaled_gradients`
<del> (or both) when using a `tf.GradientTape`, the model will likely converge to a
<del> worse quality. Please make sure you call each function exactly once.
<add> (or both) when using a `tf.GradientTape`, the model will likely converge to
<add> a worse quality. Please make sure you call each function exactly once.
<ide>
<ide> When mixed precision with float16 is used, there is typically no risk of
<ide> underflow affecting model quality if loss scaling is properly used. See
<ide> class BaseLossScaleOptimizer(metaclass=LossScaleOptimizerMetaclass):
<ide> inner_optimizer: The `tf.keras.optimizers.Optimizer` or
<ide> `tf.keras.optimizers.experimental.Optimizer` instance to wrap.
<ide> dynamic: Bool indicating whether dynamic loss scaling is used. Defaults to
<del> True. If True, the loss scale will be dynamically updated over time using
<del> an algorithm that keeps the loss scale at approximately its optimal value.
<del> If False, a single fixed loss scale is used and `initial_scale` must be
<del> specified, which is used as the loss scale. Recommended to keep as True,
<del> as choosing a fixed loss scale can be tricky. Currently, there is a small
<del> performance overhead to dynamic loss scaling compared to fixed loss
<del> scaling.
<add> True. If True, the loss scale will be dynamically updated over time
<add> using an algorithm that keeps the loss scale at approximately its
<add> optimal value. If False, a single fixed loss scale is used and
<add> `initial_scale` must be specified, which is used as the loss scale.
<add> Recommended to keep as True, as choosing a fixed loss scale can be
<add> tricky. Currently, there is a small performance overhead to dynamic loss
<add> scaling compared to fixed loss scaling.
<ide> initial_scale: The initial loss scale. If `dynamic` is True, this defaults
<ide> to `2 ** 15`. If `dynamic` is False, this must be specified and acts as
<ide> the sole loss scale, as the loss scale does not change over time. When
<del> dynamic loss scaling is used, is better for this to be a very high number,
<del> because a loss scale that is too high gets lowered far more quickly than a
<del> loss scale that is too low gets raised.
<add> dynamic loss scaling is used, is better for this to be a very high
<add> number, because a loss scale that is too high gets lowered far more
<add> quickly than a loss scale that is too low gets raised.
<ide> dynamic_growth_steps: With dynamic loss scaling, every
<ide> `dynamic_growth_steps` steps with finite gradients, the loss scale is
<ide> doubled. Defaults to 2000. If a nonfinite gradient is encountered, the
<del> count is reset back to zero, gradients are skipped that step, and the loss
<del> scale is halved. The count can be queried with
<del> `LossScaleOptimizer.dynamic_counter`. This argument can only be specified
<del> if `dynamic` is True.
<add> count is reset back to zero, gradients are skipped that step, and the
<add> loss scale is halved. The count can be queried with
<add> `LossScaleOptimizer.dynamic_counter`. This argument can only be
<add> specified if `dynamic` is True.
<ide>
<ide> `LossScaleOptimizer` will occasionally skip applying gradients to the
<ide> variables, in which case the trainable variables will not change that step.
<ide> This is done because the dynamic loss scale will sometimes be raised too
<del> high, causing overflow in the gradients. Typically, the first 2 to 15 steps of
<del> the model are skipped as the initial loss scale is very high, but afterwards
<del> steps will only be skipped on average 0.05% of the time (the fraction of steps
<del> skipped is `1 / dynamic_growth_steps`).
<add> high, causing overflow in the gradients. Typically, the first 2 to 15 steps
<add> of the model are skipped as the initial loss scale is very high, but
<add> afterwards steps will only be skipped on average 0.05% of the time (the
<add> fraction of steps skipped is `1 / dynamic_growth_steps`).
<ide>
<ide> `LossScaleOptimizer` delegates all public `Optimizer` methods to the inner
<del> optimizer. Additionally, in methods `minimize` and `get_gradients`, it scales
<del> the loss and unscales the gradients. In methods `minimize` and
<add> optimizer. Additionally, in methods `minimize` and `get_gradients`, it
<add> scales the loss and unscales the gradients. In methods `minimize` and
<ide> `apply_gradients`, it additionally updates the loss scale and skips applying
<ide> gradients if any gradient has a nonfinite value.
<ide>
<ide> ### Hyperparameters
<ide>
<del> If wrapping a `tf.keras.optimizers.Optimizer`, hyperparameters can be accessed
<del> and set on the LossScaleOptimizer, which will be delegated to the wrapped
<del> optimizer.
<add> If wrapping a `tf.keras.optimizers.Optimizer`, hyperparameters can be
<add> accessed and set on the LossScaleOptimizer, which will be delegated to the
<add> wrapped optimizer.
<ide>
<ide> >>> opt = tf.keras.optimizers.Adam(beta_1=0.8, epsilon=1e-5)
<ide> >>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
<ide> class BaseLossScaleOptimizer(metaclass=LossScaleOptimizerMetaclass):
<ide> >>> opt.inner_optimizer.epsilon
<ide> >>> 1e-5
<ide>
<del> In the above example, despite epsilon being set on the LossScaleOptimizer, the
<del> old epsilon value will still be used when training as epsilon was not set on
<del> the inner optimizer.
<add> In the above example, despite epsilon being set on the LossScaleOptimizer,
<add> the old epsilon value will still be used when training as epsilon was not
<add> set on the inner optimizer.
<ide> """
<ide>
<ide> @property
<ide> def loss_scale(self):
<ide>
<ide> @property
<ide> def dynamic_counter(self):
<del> """The number of steps since the loss scale was last increased or decreased.
<add> """The number of steps since the loss scale was last increased or
<add> decreased.
<ide>
<ide> This is None if `LossScaleOptimizer.dynamic` is False.
<ide>
<ide> The counter is incremented every step. Once it reaches
<del> `LossScaleOptimizer.dynamic_growth_steps`, the loss scale will be doubled
<del> and the counter will be reset back to zero. If nonfinite gradients are
<del> encountered, the loss scale will be halved and the counter will be reset
<del> back to zero.
<add> `LossScaleOptimizer.dynamic_growth_steps`, the loss scale will be
<add> doubled and the counter will be reset back to zero. If nonfinite
<add> gradients are encountered, the loss scale will be halved and the counter
<add> will be reset back to zero.
<ide> """
<ide> raise NotImplementedError
<ide>
<ide> def dynamic_growth_steps(self):
<ide>
<ide> This is None if `LossScaleOptimizer.dynamic` is False.
<ide>
<del> Every `dynamic_growth_steps` consecutive steps with finite gradients, the
<del> loss scale is increased.
<add> Every `dynamic_growth_steps` consecutive steps with finite gradients,
<add> the loss scale is increased.
<ide> """
<ide> raise NotImplementedError
<ide>
<ide> def get_scaled_loss(self, loss):
<ide> """Scales the loss by the loss scale.
<ide>
<ide> This method is only needed if you compute gradients manually, e.g. with
<del> `tf.GradientTape`. In that case, call this method to scale the loss before
<del> passing the loss to `tf.GradientTape`. If you use
<del> `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
<del> scaling is automatically applied and this method is unneeded.
<add> `tf.GradientTape`. In that case, call this method to scale the loss
<add> before passing the loss to `tf.GradientTape`. If you use
<add> `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`,
<add> loss scaling is automatically applied and this method is unneeded.
<ide>
<del> If this method is called, `get_unscaled_gradients` should also be called.
<del> See the `tf.keras.mixed_precision.LossScaleOptimizer` doc for
<add> If this method is called, `get_unscaled_gradients` should also be
<add> called. See the `tf.keras.mixed_precision.LossScaleOptimizer` doc for
<ide> an example.
<ide>
<ide> Args:
<del> loss: The loss, which will be multiplied by the loss scale. Can either be
<del> a tensor or a callable returning a tensor.
<add> loss: The loss, which will be multiplied by the loss scale. Can either
<add> be a tensor or a callable returning a tensor.
<ide>
<ide> Returns:
<ide> `loss` multiplied by `LossScaleOptimizer.loss_scale`.
<ide> def get_unscaled_gradients(self, grads):
<ide> """Unscales the gradients by the loss scale.
<ide>
<ide> This method is only needed if you compute gradients manually, e.g. with
<del> `tf.GradientTape`. In that case, call this method to unscale the gradients
<del> after computing them with `tf.GradientTape`. If you use
<del> `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
<del> scaling is automatically applied and this method is unneeded.
<add> `tf.GradientTape`. In that case, call this method to unscale the
<add> gradients after computing them with `tf.GradientTape`. If you use
<add> `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`,
<add> loss scaling is automatically applied and this method is unneeded.
<ide>
<ide> If this method is called, `get_scaled_loss` should also be called. See
<ide> the `tf.keras.mixed_precision.LossScaleOptimizer` doc for an
<ide> example.
<ide>
<ide> Args:
<del> grads: A list of tensors, each which will be divided by the loss scale.
<del> Can have None values, which are ignored.
<add> grads: A list of tensors, each which will be divided by the loss
<add> scale. Can have None values, which are ignored.
<ide>
<ide> Returns:
<del> A new list the same size as `grads`, where every non-None value in `grads`
<del> is divided by `LossScaleOptimizer.loss_scale`.
<add> A new list the same size as `grads`, where every non-None value in
<add> `grads` is divided by `LossScaleOptimizer.loss_scale`.
<ide> """
<ide> # Calls to this function would be delegated to `get_unscaled_gradients`
<ide> # of either `LossScaleOptimizer` or `LossScaleOptimizerV3`, depending on
<ide> def __init__(
<ide> ):
<ide> if not isinstance(inner_optimizer, optimizer_v2.OptimizerV2):
<ide> if isinstance(inner_optimizer, optimizer_experimental.Optimizer):
<del> # Give better error message if the new experimental optimizer is passed.
<add> # Give better error message if the new experimental optimizer is
<add> # passed.
<ide> raise TypeError(
<del> f"You passed an instance of the new experimental optimizer, "
<del> f"`optimizer_experimental.Optimizer`, to LossScaleOptimizer, but "
<add> f"You passed an instance of the new experimental "
<add> f"optimizer, `optimizer_experimental.Optimizer`, "
<add> f"to LossScaleOptimizer, but "
<ide> f"only the classic optimizers subclassing from "
<del> f"`tf.keras.optimizers.Optimizer` can be passed. Please use "
<del> f"`loss_scale_optimizer.LossScaleOptimizerV3` instead of "
<del> f"`tf.keras.mixed_precision.LossScaleOptimizer`, as the former "
<del> f"supports wrapping instances of the new experimental optimizer. "
<add> f"`tf.keras.optimizers.Optimizer` can be passed. Please "
<add> f"use `loss_scale_optimizer.LossScaleOptimizerV3` "
<add> f"instead of "
<add> f"`tf.keras.mixed_precision.LossScaleOptimizer`, "
<add> f"as the former supports wrapping "
<add> f"instances of the new experimental optimizer. "
<ide> f"Got optimizer: {inner_optimizer}"
<ide> )
<ide> msg = (
<ide> def __init__(
<ide> msg += (
<ide> 'Please make sure "inner_optimizer" is not an instance of '
<ide> "`tensorflow.python.keras.optimizers`, which is "
<del> "the legacy keras code and will be removed in future release. "
<del> "Please use the tf.keras public API instead."
<add> "the legacy keras code and will be removed in future "
<add> "release. Please use the tf.keras public API instead."
<ide> )
<ide> raise TypeError(msg)
<ide> if not isinstance(dynamic, bool):
<ide> # Catch errors if a user incorrectly passes a string or float to the
<del> # second argument argument, as this was commonly done for the now-removed
<del> # LossScaleOptimizerV1.
<add> # second argument argument, as this was commonly done for the
<add> # now-removed LossScaleOptimizerV1.
<ide> raise TypeError(
<ide> '"dynamic" argument to LossScaleOptimizer.__init__ must '
<ide> "be a bool, but got: %r" % (dynamic,)
<ide> def __init__(
<ide> if getattr(
<ide> inner_optimizer, "_is_wrapped_by_loss_scale_optimizer", False
<ide> ):
<del> # TODO(reedwm): Maybe support this. The difficulty is that LSO has the
<del> # same checkpoint format as the inner optimizer, so multiple LSOs wrapping
<del> # the same optimizer causes the checkpointing logic to become confused.
<add> # TODO(reedwm): Maybe support this. The difficulty is that LSO has
<add> # the same checkpoint format as the inner optimizer, so multiple
<add> # LSOs wrapping the same optimizer causes the checkpointing logic to
<add> # become confused.
<ide> raise ValueError(
<ide> '"inner_optimizer" is already wrapped by a '
<ide> "LossScaleOptimizer. An optimizer can only be wrapped "
<ide> def __init__(
<ide> self._optimizer = inner_optimizer
<ide> self._optimizer._is_wrapped_by_loss_scale_optimizer = True
<ide>
<del> # We don't call super().__init__, since we do not want to call OptimizerV2's
<del> # constructor.
<add> # We don't call super().__init__, since we do not want to call
<add> # OptimizerV2's constructor.
<ide> tf.__internal__.tracking.DelegatingTrackableMixin.__init__(
<ide> self, self._optimizer
<ide> )
<ide> def __init__(
<ide> "is False, but got: %s" % (dynamic_growth_steps,)
<ide> )
<ide>
<del> # Used to track whether get_scaled_loss() and get_unscaled_gradients() have
<del> # been called
<add> # Used to track whether get_scaled_loss() and get_unscaled_gradients()
<add> # have been called
<ide> self._loss_has_been_scaled = False
<ide> self._gradients_have_been_unscaled = False
<ide>
<ide> def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):
<ide> tape = tf.GradientTape() if tape is None else tape
<ide> with tape:
<ide> loss = self.get_scaled_loss(loss)
<del> grads_and_vars = self._optimizer._compute_gradients( # pylint: disable=protected-access
<add> grads_and_vars = self._optimizer._compute_gradients(
<ide> loss, var_list, grad_loss, tape=tape
<ide> )
<ide> grads = [g for g, _ in grads_and_vars]
<ide> def apply_gradients(
<ide> raise ValueError(
<ide> "apply_gradients() must be called in a replica context."
<ide> )
<del> # We check for the strategy here despite already checking in the constructor
<del> # as frequently the optimizer is created outside the strategy's scope.
<add> # We check for the strategy here despite already checking in the
<add> # constructor as frequently the optimizer is created outside the
<add> # strategy's scope.
<ide> _raise_if_strategy_unsupported()
<ide> _maybe_warn_about_scaling(
<ide> self._loss_has_been_scaled, self._gradients_have_been_unscaled
<ide> def apply_gradients(
<ide> grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
<ide> if experimental_aggregate_gradients:
<ide> # We must aggregate the gradients here instead of in
<del> # self.optimizer.apply_gradients, so that any NaN or Inf gradients are
<del> # propagated to each replica. If any replica has a NaN or Inf gradient,
<del> # they must all have a NaN or Inf gradient so that they all skip the step.
<del> # pylint: disable=protected-access
<add> # self.optimizer.apply_gradients, so that any NaN or Inf gradients
<add> # are propagated to each replica. If any replica has a NaN or Inf
<add> # gradient, they must all have a NaN or Inf gradient so that they
<add> # all skip the step.
<ide> grads_and_vars = self._optimizer._transform_unaggregated_gradients(
<ide> grads_and_vars
<ide> )
<ide> def apply_gradients(
<ide>
<ide> def do_not_apply_fn():
<ide> # Normally self._optimizer.iterations is incremented in
<del> # self._optimizer.apply_gradients(). Since that is not called in this
<del> # branch, we increment it here instead.
<add> # self._optimizer.apply_gradients(). Since that is not called in
<add> # this branch, we increment it here instead.
<ide> return self._optimizer.iterations.assign_add(1, read_value=False)
<ide>
<ide> def _if_should_apply_grads(grads):
<ide> def apply_fn():
<ide> )
<ide>
<ide> # Note: We must call this cond() in a cross-replica context.
<del> # DistributionStrategy does not support having a cond in a replica
<del> # context with a branch that calls `merge_call`, and
<add> # DistributionStrategy does not support having a cond in a
<add> # replica context with a branch that calls `merge_call`, and
<ide> # self._optimizer.apply_gradients calls `merge_call`.
<ide> maybe_apply_op = tf.__internal__.smart_cond.smart_cond(
<ide> should_apply_grads, apply_fn, do_not_apply_fn
<ide> def from_config(cls, config, custom_objects=None):
<ide> config = config.copy() # Make a copy, since we mutate config
<ide> if "loss_scale" in config:
<ide> # If loss_scale is in config, we assume we are deserializing a
<del> # LossScaleOptimizer from TF 2.3 or below. We convert the config so it
<del> # can be deserialized in the current LossScaleOptimizer.
<add> # LossScaleOptimizer from TF 2.3 or below. We convert the config so
<add> # it can be deserialized in the current LossScaleOptimizer.
<ide> loss_scale = generic_utils.deserialize_keras_object(
<ide> config.pop("loss_scale"),
<ide> module_objects={
<ide> def from_config(cls, config, custom_objects=None):
<ide> )
<ide> else:
<ide> raise ValueError(
<del> "Serialized LossScaleOptimizers with a LossScale that is neither a "
<del> "FixedLossScale nor a DynamicLossScale can no longer be "
<del> "deserialized"
<add> "Serialized LossScaleOptimizers with a LossScale that is "
<add> "neither a FixedLossScale nor a DynamicLossScale can no "
<add> "longer be deserialized"
<ide> )
<ide> config["inner_optimizer"] = config.pop("optimizer")
<ide> inner_optimizer = optimizers.deserialize(
<ide> def _restore_slot_variable(self, slot_name, variable, slot_variable):
<ide> def _create_or_restore_slot_variable(
<ide> self, slot_variable_position, slot_name, variable
<ide> ):
<del> return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access
<add> return self._optimizer._create_or_restore_slot_variable(
<ide> slot_variable_position, slot_name, variable
<ide> )
<ide>
<ide> def __dir__(self):
<ide> def __setattr__(self, name, value):
<ide> if name == "lr":
<ide> name = "learning_rate"
<del> # Delegate setting hyperparameter to inner optimizer if the attribute does
<del> # not exist on the LossScaleOptimizer
<add> # Delegate setting hyperparameter to inner optimizer if the attribute
<add> # does not exist on the LossScaleOptimizer
<ide> try:
<del> # We cannot check for the 'iterations' attribute as it cannot be set after
<del> # it is accessed.
<add> # We cannot check for the 'iterations' attribute as it cannot be set
<add> # after it is accessed.
<ide> if name != "iterations":
<ide> object.__getattribute__(self, name)
<ide> has_attribute = True
<ide> def __setattr__(self, name, value):
<ide> else:
<ide> super().__setattr__(name, value)
<ide>
<del> # Explicitly delegate learning_rate. Normally hyperparameters are delegated in
<del> # __getattribute__, but if a hyperparameter is not in self._optimizer._hyper
<del> # (e.g. because self._optimizer itself wraps another optimizer), then it won't
<del> # be delegated. Since learning_rate is a very commonly accessed
<del> # hyperparameter, we delegate it here.
<add> # Explicitly delegate learning_rate. Normally hyperparameters are delegated
<add> # in __getattribute__, but if a hyperparameter is not in
<add> # self._optimizer._hyper (e.g. because self._optimizer itself wraps another
<add> # optimizer), then it won't be delegated. Since learning_rate is a very
<add> # commonly accessed hyperparameter, we delegate it here.
<ide> @property
<ide> def learning_rate(self):
<ide> return self._optimizer.learning_rate
<ide> def lr(self):
<ide> def lr(self, value):
<ide> self._optimizer.lr = value
<ide>
<del> # We do not override some OptimizerV2 methods. For each, we describe why we do
<del> # not delegate them to self._optimizer:
<add> # We do not override some OptimizerV2 methods. For each, we describe why we
<add> # do not delegate them to self._optimizer:
<ide> # * get_updates: get_updates() calls get_gradients(). Since we override
<ide> # get_gradients(), we cannot delegate get_updates() to self._optimizer,
<ide> # otherwise the overridden get_gradients() method would not be called.
<ide> class LossScaleOptimizerV3(
<ide> class instead of the `tf.keras.optimizers.Optimizer` class. Some of the
<ide> methods this class defines and calls are different compared to
<ide> LossScaleOptimizer due to the differences between the two Optimizer base
<del> classes. Additionally, this class does not support the legacy graph mode, but
<del> LossScaleOptimizer does.
<add> classes. Additionally, this class does not support the legacy graph mode,
<add> but LossScaleOptimizer does.
<ide>
<ide> Since the new experimental Optimizer does not have a hyperparameter concept,
<ide> LossScaleOptimizerV3 does not delegate arbitrary hyperparameter accesses to
<ide> def __init__(
<ide> ):
<ide> if not isinstance(inner_optimizer, optimizer_experimental.Optimizer):
<ide> if isinstance(inner_optimizer, optimizer_v2.OptimizerV2):
<del> # Give better error message if the OptimizerV2 class is passed instead
<del> # of the new experimental optimizer.
<add> # Give better error message if the OptimizerV2 class is passed
<add> # instead of the new experimental optimizer.
<ide> raise TypeError(
<ide> f"You passed a `tf.keras.optimizer.Optimizer` instance to "
<del> f"LossScaleOptimizerV3, but only the new experimental optimizer "
<del> f"defined in keras/optimizer_expeirmental/optimizer.py can be "
<del> f"passed. Please use `tf.keras.mixed_precision.LossScaleOptimizer` "
<add> f"LossScaleOptimizerV3, but only the new experimental "
<add> f"optimizer defined in "
<add> f"keras/optimizer_expeirmental/optimizer.py can be "
<add> f"passed. Please use "
<add> f"`tf.keras.mixed_precision.LossScaleOptimizer` "
<ide> f"instead of LossScaleOptimizerV3, as the former supports "
<ide> f"`tf.keras.optimizer.Optimizer`s. Got optimizer: "
<ide> f"{inner_optimizer}"
<ide> def __init__(
<ide> )
<ide> if not isinstance(dynamic, bool):
<ide> # Catch errors if a user incorrectly passes a string or float to the
<del> # second argument argument, as this was commonly done for the now-removed
<del> # LossScaleOptimizerV1.
<add> # second argument argument, as this was commonly done for the
<add> # now-removed LossScaleOptimizerV1.
<ide> raise TypeError(
<ide> f'"dynamic" argument to LossScaleOptimizer.__init__ must '
<ide> f"be a bool, but got: {repr(dynamic)}"
<ide> def __init__(
<ide> if getattr(
<ide> inner_optimizer, "_is_wrapped_by_loss_scale_optimizer", False
<ide> ):
<del> # TODO(reedwm): Maybe support this. The difficulty is that LSO has the
<del> # same checkpoint format as the inner optimizer, so multiple LSOs wrapping
<del> # the same optimizer causes the checkpointing logic to become confused.
<add> # TODO(reedwm): Maybe support this. The difficulty is that LSO has
<add> # the same checkpoint format as the inner optimizer, so multiple
<add> # LSOs wrapping the same optimizer causes the checkpointing logic to
<add> # become confused.
<ide> raise ValueError(
<ide> '"inner_optimizer" is already wrapped by a '
<ide> "LossScaleOptimizer. An optimizer can only be wrapped "
<ide> def __init__(
<ide> self._optimizer = inner_optimizer
<ide> self._optimizer._is_wrapped_by_loss_scale_optimizer = True
<ide>
<del> # We don't call super().__init__, since we do not want to call Optimizer's
<del> # constructor.
<add> # We don't call super().__init__, since we do not want to call
<add> # Optimizer's constructor.
<ide> tf.__internal__.tracking.DelegatingTrackableMixin.__init__(
<ide> self, self._optimizer
<ide> )
<ide> def __init__(
<ide> f"is False, but got: {dynamic_growth_steps}"
<ide> )
<ide>
<del> # Used to track whether get_scaled_loss() and get_unscaled_gradients() have
<del> # been called
<add> # Used to track whether get_scaled_loss() and get_unscaled_gradients()
<add> # have been called
<ide> self._loss_has_been_scaled = False
<ide> self._gradients_have_been_unscaled = False
<ide>
<ide> def compute_gradients(self, loss, var_list, tape=None):
<ide> tape = tf.GradientTape() if tape is None else tape
<ide> with tape:
<ide> loss = self.get_scaled_loss(loss)
<del> grads_and_vars = self._optimizer.compute_gradients( # pylint: disable=protected-access
<add> grads_and_vars = self._optimizer.compute_gradients(
<ide> loss, var_list, tape=tape
<ide> )
<ide> grads = [g for g, _ in grads_and_vars]
<ide> def apply_gradients(self, grads_and_vars, skip_gradients_aggregation=False):
<ide> raise ValueError(
<ide> "apply_gradients() must be called in a replica context."
<ide> )
<del> # We check for the strategy here despite already checking in the constructor
<del> # as frequently the optimizer is created outside the strategy's scope.
<add> # We check for the strategy here despite already checking in the
<add> # constructor as frequently the optimizer is created outside the
<add> # strategy's scope.
<ide> _raise_if_strategy_unsupported()
<ide> _maybe_warn_about_scaling(
<ide> self._loss_has_been_scaled, self._gradients_have_been_unscaled
<ide> def apply_gradients(self, grads_and_vars, skip_gradients_aggregation=False):
<ide> grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
<ide> if not skip_gradients_aggregation:
<ide> # We must aggregate the gradients here instead of in
<del> # self.optimizer.apply_gradients, so that any NaN or Inf gradients are
<del> # propagated to each replica. If any replica has a NaN or Inf gradient,
<del> # they must all have a NaN or Inf gradient so that they all skip the step.
<del> # pylint: disable=protected-access
<add> # self.optimizer.apply_gradients, so that any NaN or Inf gradients
<add> # are propagated to each replica. If any replica has a NaN or Inf
<add> # gradient, they must all have a NaN or Inf gradient so that they
<add> # all skip the step.
<ide> grads_and_vars = self._optimizer.aggregate_gradients(grads_and_vars)
<del> # pylint: enable=protected-access
<ide>
<ide> grads_and_vars = tuple(grads_and_vars)
<ide> grads = [g for g, _ in grads_and_vars]
<ide> def apply_gradients(self, grads_and_vars, skip_gradients_aggregation=False):
<ide>
<ide> def do_not_apply_fn():
<ide> # Normally self._optimizer.iterations is incremented in
<del> # self._optimizer.apply_gradients(). Since that is not called in this
<del> # branch, we increment it here instead.
<add> # self._optimizer.apply_gradients(). Since that is not called in
<add> # this branch, we increment it here instead.
<ide> self._optimizer.iterations.assign_add(1, read_value=False)
<ide>
<ide> def _if_should_apply_grads(grads):
<ide> def apply_fn():
<ide> )
<ide>
<ide> # Note: We must call this cond() in a cross-replica context.
<del> # DistributionStrategy does not support having a cond in a replica
<del> # context with a branch that calls `merge_call`, and
<add> # DistributionStrategy does not support having a cond in a
<add> # replica context with a branch that calls `merge_call`, and
<ide> # self._optimizer.apply_gradients calls `merge_call`.
<ide> tf.__internal__.smart_cond.smart_cond(
<ide> should_apply_grads, apply_fn, do_not_apply_fn
<ide> def learning_rate(self, learning_rate):
<ide> class FakeOptimizerForRestoration(tf.__internal__.tracking.Trackable):
<ide> """A fake optimizer used to support restoring TensorFlow 2.2 checkpoints.
<ide>
<del> The checkpoint format for LossScaleOptimizers changed after TF 2.2. This class
<del> exists to support restoring TF 2.2 checkpoints in newer version of TensorFlow.
<add> The checkpoint format for LossScaleOptimizers changed after TF 2.2. This
<add> class exists to support restoring TF 2.2 checkpoints in newer version of
<add> TensorFlow.
<ide>
<del> In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling the
<del> following in LossScaleOptimizer.__init__
<add> In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling
<add> the following in LossScaleOptimizer.__init__
<ide>
<ide> ```
<ide> self._track_trackable(self._optimizer, 'base_optimizer')
<ide> class FakeOptimizerForRestoration(tf.__internal__.tracking.Trackable):
<ide> LossScaleOptimizer is the same as the format without a LossScaleOptimizer,
<ide> except the loss scale is also stored. This means there is no dependency from
<ide> the LossScaleOptimizer to the wrapped optimizer. Instead, the
<del> LossScaleOptimizer acts as if it is the wrapped optimizer, from a checkpoint's
<del> perspective, by overriding all Trackable methods and delegating them to the
<del> wrapped optimizer.
<add> LossScaleOptimizer acts as if it is the wrapped optimizer, from a
<add> checkpoint's perspective, by overriding all Trackable methods and delegating
<add> them to the wrapped optimizer.
<ide>
<ide> To allow restoring TF 2.2. checkpoints, LossScaleOptimizer adds a dependency
<ide> on this class instead of the inner optimizer. When restored, this class will
<ide> def get_slot_names(self):
<ide> def _create_or_restore_slot_variable(
<ide> self, slot_variable_position, slot_name, variable
<ide> ):
<del> return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access
<add> return self._optimizer._create_or_restore_slot_variable(
<ide> slot_variable_position, slot_name, variable
<ide> )
<ide>
<ide> def _create_loss_scale_optimizer_from_v1_loss_scale(optimizer, loss_scale):
<ide> """Creates an LSO from a tf.compat.v1.mixed_precision.LossScale.
<ide>
<ide> This is only used to pass to
<del> `tf.__internal__.mixed_precision.register_loss_scale_wrapper` below, which is
<del> called so that
<add> `tf.__internal__.mixed_precision.register_loss_scale_wrapper` below, which
<add> is called so that
<ide> `tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite` can
<ide> wrap a Keras optimizer with a LossScaleOptimizer.
<ide>
<ide> def strategy_supports_loss_scaling():
<ide> if not tf.distribute.has_strategy():
<ide> return True
<ide> strategy = tf.distribute.get_strategy()
<del> # Strategies are supported if either there is only one replica or if variables
<del> # are replicated per device. Otherwise, the current model.fit() implementation
<del> # and most custom training loops incorrectly unscale the gradients. Currently,
<del> # gradients are unscaled once per compute replica, but they should be unscaled
<del> # once per variable replica. When there is one variable replica for each
<del> # compute replica, this works fine, but otherwise issues will occur.
<add> # Strategies are supported if either there is only one replica or if
<add> # variables are replicated per device. Otherwise, the current model.fit()
<add> # implementation and most custom training loops incorrectly unscale the
<add> # gradients. Currently, gradients are unscaled once per compute replica, but
<add> # they should be unscaled once per variable replica. When there is one
<add> # variable replica for each compute replica, this works fine, but otherwise
<add> # issues will occur.
<ide> # TODO(reedwm): Support all strategies.
<ide> return isinstance(
<ide> strategy,
<ide> def strategy_supports_loss_scaling():
<ide>
<ide>
<ide> def _raise_if_strategy_unsupported():
<del> """Raise an exception if the current strategy doesn't support loss scaling."""
<add> """Raise an exception if the current strategy doesn't support loss
<add> scaling."""
<ide> if not strategy_supports_loss_scaling():
<ide> strategy = tf.distribute.get_strategy()
<ide> if isinstance(
<ide> def _raise_if_strategy_unsupported():
<ide> ),
<ide> ):
<ide> raise ValueError(
<del> "Loss scaling is not supported with TPUStrategy. Loss scaling is "
<del> "unnecessary with TPUs, since they support bfloat16 instead of "
<del> "float16 and bfloat16 does not require loss scaling. You should "
<del> "remove the use of the LossScaleOptimizer when TPUs are used."
<add> "Loss scaling is not supported with TPUStrategy. Loss scaling "
<add> "is unnecessary with TPUs, since they support bfloat16 instead "
<add> "of float16 and bfloat16 does not require loss scaling. You "
<add> "should remove the use of the LossScaleOptimizer when TPUs are "
<add> "used."
<ide> )
<ide> else:
<ide> raise ValueError(
<ide><path>keras/mixed_precision/loss_scale_optimizer_test.py
<ide> def opt_and_strategy_and_mode_combinations():
<ide> """Returns combinations for running with multiple optimizers and strategies.
<ide>
<ide> Returns:
<del> Combinations that run with both OptimizerV2 and the experimental optimizer;
<del> and with the default strategy and mirrored strategy; and in both graph and
<del> eager mode.
<add> Combinations that run with both OptimizerV2 and the experimental
<add> optimizer; and with the default strategy and mirrored strategy; and in
<add> both graph and eager mode.
<ide> """
<ide> # For the experimental optimizer, don't use graph mode directly since it's
<ide> # unsupported. Instead, run both without and with a tf.function, in order to
<ide> def opt_combinations_only():
<ide> @tf_test_utils.with_control_flow_v2
<ide> class LossScaleOptimizerTest(tf.test.TestCase, parameterized.TestCase):
<ide> def _run_if_in_graph_mode(self, val):
<del> # Running only in graph mode is useful, because optimizers sometimes return
<del> # a value that, in Graph mode, is runnable with self.evaluate. But in Eager
<del> # mode, the optimizer already does the computations and the return value
<del> # cannot be run.
<add> # Running only in graph mode is useful, because optimizers sometimes
<add> # return a value that, in Graph mode, is runnable with self.evaluate.
<add> # But in Eager mode, the optimizer already does the computations and the
<add> # return value cannot be run.
<ide> if not tf.executing_eagerly():
<ide> self.evaluate(val)
<ide>
<ide> def _eval_if_tensor(self, val):
<del> # Calls self.evaluate on val if val is a Tensor or Variable. This is useful,
<del> # since hyperparameters are tf.Variables on OptimizerV2 and are Python
<del> # floats on the experimental optimizer.
<add> # Calls self.evaluate on val if val is a Tensor or Variable. This is
<add> # useful, since hyperparameters are tf.Variables on OptimizerV2 and are
<add> # Python floats on the experimental optimizer.
<ide> return (
<ide> self.evaluate(val)
<ide> if isinstance(val, (tf.Tensor, tf.Variable))
<ide> def testFixedLossScaleAppliedToLossWithMinimize(
<ide> opt = create_lso(opt, dynamic=False, initial_scale=loss_scale)
<ide> self.assertEqual(self.evaluate(opt.loss_scale), loss_scale)
<ide> self.assertIsInstance(opt.loss_scale, tf.Tensor)
<del> # We need num_replicas_in_sync to divide loss_scale, otherwise loss_scale
<del> # / strategy.num_replicas_in_sync will not be exact, which could lead to
<del> # assertion failures due to rounding issues.
<add> # We need num_replicas_in_sync to divide loss_scale, otherwise
<add> # loss_scale / strategy.num_replicas_in_sync will not be exact,
<add> # which could lead to assertion failures due to rounding issues.
<ide> self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0)
<ide> run_fn = self._run_fn_with_grad_check(
<ide> strategy, var, opt, loss_scale / strategy.num_replicas_in_sync
<ide> def testFixedLossScaleAppliedToLossWithMinimize(
<ide> run_op = strategy.experimental_run(run_fn)
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> self._run_if_in_graph_mode(run_op)
<del> # The loss is the identity of the variable. Therefore the gradient is 1,
<del> # and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
<add> # The loss is the identity of the variable. Therefore the gradient
<add> # is 1, and so the variable will be init_val - grad * lr == 5 - 1 *
<add> # 2 == 3
<ide> self.assertAllClose([3.0], self.evaluate(var))
<ide>
<ide> def testFixedLossScaleAppliedToLossWithGetGradients(self):
<ide> def testFixedLossScaleAppliedToLossWithGetGradients(self):
<ide> run_op = opt.get_gradients(loss, [var])
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> # This will cause an assertion to run, as
<del> # mp_test_util.create_identity_with_grad_check_fn added an assertion op.
<add> # mp_test_util.create_identity_with_grad_check_fn added an assertion
<add> # op.
<ide> self.evaluate(run_op)
<ide>
<ide> @test_combinations.generate(opt_combinations_only())
<ide> def testDynamicLossScale(self, opt_cls, strategy_fn, use_tf_function):
<ide> run_op = strategy.experimental_run(run_fn)
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> self._run_if_in_graph_mode(run_op)
<del> # The loss is the identity of the variable. Therefore the gradient is 1,
<del> # and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
<add> # The loss is the identity of the variable. Therefore the gradient
<add> # is 1, and so the variable will be init_val - grad * lr == 5 - 1 *
<add> # 2 == 3
<ide> self.assertAllClose([3.0], self.evaluate(var))
<ide>
<del> # Loss scale will be double, so the expected gradient is also doubled.
<add> # Loss scale will be double, so the expected gradient is also
<add> # doubled.
<ide> self.evaluate(
<ide> expected_gradient.assign(
<ide> 2 * learning_rate / strategy.num_replicas_in_sync
<ide> )
<ide> )
<ide> run_op = strategy.experimental_run(run_fn)
<ide> self._run_if_in_graph_mode(run_op)
<del> # As before, the 2 is subtracted from the variable, making it's new value
<del> # 1.
<add> # As before, the 2 is subtracted from the variable, making it's new
<add> # value 1.
<ide> self.assertAllClose([1.0], self.evaluate(var))
<ide>
<ide> @test_combinations.generate(opt_combinations_only())
<ide> def testClipping(self, opt_cls, strategy_fn, use_tf_function):
<ide> self.assertEqual(self.evaluate(opt.loss_scale), 4)
<ide>
<ide> if isinstance(opt, loss_scale_optimizer.LossScaleOptimizerV3):
<del> # Only OptimizerV2 exposes the clipping attributes, so we cannot set
<del> # them on the new optimizer
<add> # Only OptimizerV2 exposes the clipping attributes, so we
<add> # cannot set them on the new optimizer
<ide> return
<ide> # Test changing the clip amount and running again
<ide> setattr(opt, clip_type, 3.0)
<ide> def loss():
<ide> run_op = strategy.experimental_run(run_fn)
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> self._run_if_in_graph_mode(run_op)
<del> # The loss is the identity of the variable. Therefore the gradient is 1,
<del> # and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
<add> # The loss is the identity of the variable. Therefore the gradient
<add> # is 1, and so the variable will be init_val - grad * lr == 5 - 1 *
<add> # 2 == 3
<ide> self.assertAllClose([3.0], self.evaluate(var))
<ide>
<ide> @test_combinations.generate(opt_and_strategy_and_mode_combinations())
<ide> def loss():
<ide>
<ide> def testCustomAggregater(self):
<ide> def gradient_aggregator(grads_and_vars):
<del> # Simulate an all-reduce where a replica has a NaN gradient by setting
<del> # the last gradient to NaN
<add> # Simulate an all-reduce where a replica has a NaN gradient by
<add> # setting the last gradient to NaN
<ide> grads_and_vars = list(grads_and_vars)
<ide> last_grad, last_var = grads_and_vars[-1]
<ide> grads_and_vars[-1] = (last_grad * float("NaN"), last_var)
<ide> def testDynamicLossScaleWithSlots(
<ide> self.evaluate(tf.compat.v1.global_variables_initializer())
<ide> self._run_if_in_graph_mode(run_op)
<ide> # The momentum accumulator starts at 0 and the gradient is 1. The
<del> # accumulator is incremented by the gradient, so it is now 1. Then the
<del> # variable is subtracted by the accumulator, so the variable is subtracted
<del> # by 1.
<add> # accumulator is incremented by the gradient, so it is now 1. Then
<add> # the variable is subtracted by the accumulator, so the variable is
<add> # subtracted by 1.
<ide> self.assertAllClose([0.0, 1.0], self.evaluate(var))
<ide> self.assertEqual(self.evaluate(opt.loss_scale), initial_scale * 2)
<ide>
<ide> run_op = strategy.experimental_run(run_fn)
<ide> self._run_if_in_graph_mode(run_op)
<del> # The momentum accumulator was 1 before this step and the gradient is 1.
<del> # The accumulator is incremented by the gradient, so it is now 2. Then the
<del> # variable is subtracted by the accumulator, so the variable is subtracted
<del> # by 2.
<add> # The momentum accumulator was 1 before this step and the gradient
<add> # is 1. The accumulator is incremented by the gradient, so it is
<add> # now 2. Then the variable is subtracted by the accumulator, so the
<add> # variable is subtracted by 2.
<ide> self.assertAllClose([-2.0, -1.0], self.evaluate(var))
<ide> self.assertEqual(self.evaluate(opt.loss_scale), initial_scale * 4)
<ide>
<ide> def testIterationsIncremented(self, opt_cls, strategy_fn, use_tf_function):
<ide> ) # Grad is 2, so var is 5 - 2
<ide> self.assertEqual(self.evaluate(opt.iterations), 1)
<ide>
<del> # Test iterations is incremented in opt.minimize even if gradients aren't
<del> # applied to variables due to NaN gradients.
<add> # Test iterations is incremented in opt.minimize even if gradients
<add> # aren't applied to variables due to NaN gradients.
<ide> loss = lambda: var * float("NaN")
<ide> run_fn = lambda: opt.minimize(loss, [var])
<ide> if use_tf_function:
<ide> def testHyperParametersExposed(self):
<ide> self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
<ide> self.assertIs(lso.lr, opt.lr)
<ide>
<del> # Test setting attribute that is both attribute on LossScaleOptimizer and
<del> # hyperparameter on wrapped optimizer.
<add> # Test setting attribute that is both attribute on
<add> # LossScaleOptimizer and hyperparameter on wrapped optimizer.
<ide> class MyOpt(gradient_descent.SGD):
<ide> def __init__(self):
<ide> super().__init__()
<ide> def testDir(self):
<ide> def testApplyGradientsGetsUnwrappedTensors(self):
<ide> # Tests that gradients passed to apply_gradients are not wrapped in a
<ide> # DistributionStrategy wrapper, such as PerReplica, but instead are raw
<del> # Tensors. Optimizer subclasses that override apply_gradients() expect raw
<del> # Tensors, even though the base Optimizer can handle PerReplica gradients.
<add> # Tensors. Optimizer subclasses that override apply_gradients() expect
<add> # raw Tensors, even though the base Optimizer can handle PerReplica
<add> # gradients.
<ide>
<ide> outer_self = self
<ide>
<ide> def __init__(self, *args, **kwargs):
<ide> self.assertEqual(self.evaluate(slot_var).item(), -1)
<ide> self.assertEqual(self.evaluate(opt.iterations), 1)
<ide>
<del> # Set optimizer variable to check arbitrary optimizer attributes can be
<del> # saved/restored
<add> # Set optimizer variable to check arbitrary optimizer attributes can
<add> # be saved/restored
<ide> self.evaluate(inner_opt.my_var.assign(1.0))
<ide>
<ide> # Save a checkpoint.
<ide> def testSerializationWithBuiltInOptimizer(self, lso_type):
<ide> )
<ide> config = optimizers.serialize(opt)
<ide> if lso_type == "v1":
<del> # LossScaleOptimizerV1 was an older experimental version of LSO that is
<del> # now deleted. The config had the same format as LSO but the class
<del> # name was different. This tests that LSO V1 configs can still be
<del> # deserialized, which are deserialized as a (non-V1) LSO
<add> # LossScaleOptimizerV1 was an older experimental version of LSO
<add> # that is now deleted. The config had the same format as LSO but
<add> # the class name was different. This tests that LSO V1 configs
<add> # can still be deserialized, which are deserialized as a
<add> # (non-V1) LSO
<ide> config["class_name"] = "LossScaleOptimizerV1"
<ide> else:
<ide> opt = sgd_experimental.SGD(2.0, momentum=0.5)
<ide> def testScalingWarning(self, opt_cls):
<ide> lso.get_scaled_loss(tf.constant(1.0))
<ide> lso.apply_gradients([(tf.constant(1.0), var)])
<ide> self.assertIn(
<del> "You forgot to call LossScaleOptimizer.get_unscaled_gradients() "
<del> "before",
<add> "You forgot to call "
<add> "LossScaleOptimizer.get_unscaled_gradients() before",
<ide> mock_warn.call_args_list[0][0][0],
<ide> )
<ide> lso = create_lso(create_sgd(opt_cls))
<ide> with mock.patch.object(tf_logging, "warning") as mock_warn:
<ide> lso.get_unscaled_gradients([tf.constant(1.0)])
<ide> lso.apply_gradients([(tf.constant(1.0), var)])
<ide> self.assertIn(
<del> "You forgot to call LossScaleOptimizer.get_scaled_loss() before",
<add> "You forgot to call LossScaleOptimizer.get_scaled_loss() "
<add> "before",
<ide> mock_warn.call_args_list[0][0][0],
<ide> )
<ide> lso = create_lso(create_sgd(opt_cls))
<ide><path>keras/mixed_precision/mixed_precision_graph_rewrite_test.py
<ide> class MixedPrecisionTest(test_combinations.TestCase):
<ide>
<ide> def setUp(self):
<ide> super().setUp()
<del> # Enable the tests to be run on pre-Volta GPUs by telling the grappler pass
<del> # to ignore performance and always transform the graph.
<add> # Enable the tests to be run on pre-Volta GPUs by telling the grappler
<add> # pass to ignore performance and always transform the graph.
<ide> self._original_ignore_perf_value = os.getenv(self.IGNORE_PERF_VAR)
<ide> os.environ[self.IGNORE_PERF_VAR] = "1"
<ide>
<ide><path>keras/mixed_precision/model_test.py
<ide> def loss_fn(y_true, y_pred):
<ide> del y_true
<ide> return tf.reduce_mean(y_pred)
<ide>
<del> # Learning rate is small enough that if applied to a float16 variable,
<del> # the variable will not change. So this tests the learning rate not
<del> # applied to a float16 value, but instead the float32 variable.
<add> # Learning rate is small enough that if applied to a float16
<add> # variable, the variable will not change. So this tests the
<add> # learning rate not applied to a float16 value, but instead the
<add> # float32 variable.
<ide> opt = gradient_descent.SGD(2**-14)
<del> # Use a fixed loss scale, as this test will fail if gradients are
<del> # skipped for a step due to dynamic loss scaling.
<add> # Use a fixed loss scale, as this test will fail if gradients
<add> # are skipped for a step due to dynamic loss scaling.
<ide> opt = loss_scale_optimizer.LossScaleOptimizer(
<ide> opt, dynamic=False, initial_scale=8
<ide> )
<ide> def _test_saving(self, model, dataset, save_format, use_regularizer):
<ide> },
<ide> )
<ide> def test_fixed_loss_scaling(self, strategy_fn):
<del> # Note: We do not test mixed precision in this method, only loss scaling.
<add> # Note: We do not test mixed precision in this method, only loss
<add> # scaling.
<ide> loss_scale = 8.0
<ide> batch_size = 4
<ide> with strategy_fn().scope():
<ide> x = layers.Input(shape=(1,), batch_size=batch_size)
<ide> layer = mp_test_util.MultiplyLayer()
<ide> y = layer(x)
<ide>
<del> # The gradient of 'y' at this point is 1. With loss scaling, the gradient
<del> # is 'loss_scale'. We divide by the batch size since the loss is averaged
<del> # across batch elements.
<add> # The gradient of 'y' at this point is 1. With loss scaling, the
<add> # gradient is 'loss_scale'. We divide by the batch size since the
<add> # loss is averaged across batch elements.
<ide> expected_gradient = loss_scale / batch_size
<ide> identity_with_grad_check_fn = (
<ide> mp_test_util.create_identity_with_grad_check_fn(
<ide> def loss_fn(y_true, y_pred):
<ide> y = np.ones((batch_size, 1))
<ide> dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
<ide> model.fit(dataset)
<del> # Variable starts at 1, and should have gradient of 1 subtracted from it.
<add> # Variable starts at 1, and should have gradient of 1 subtracted from
<add> # it.
<ide> expected = 0
<ide> self.assertEqual(backend.eval(layer.v), expected)
<ide>
<ide> def loss_fn(y_true, y_pred):
<ide> },
<ide> )
<ide> def test_advanced_model(self, strategy_fn, use_loss_scaling=False):
<del> # The advanced model tests mixed-precision-related features that would occur
<del> # in a resnet50 model. It tests a model that has:
<del> # * Multiple layers, some which use auto-cast variables and some which do
<del> # not
<add> # The advanced model tests mixed-precision-related features that would
<add> # occur in a resnet50 model. It tests a model that has:
<add> # * Multiple layers, some which use auto-cast variables and some which
<add> # do not
<ide> # * Regularization on some variables and not others.
<ide> # * A fixed loss scale (if use_loss_scaling is True)
<ide>
<ide> def test_advanced_model(self, strategy_fn, use_loss_scaling=False):
<ide> y = layer3(y)
<ide> y = layer4(y)
<ide> if use_loss_scaling:
<del> # The gradient of 'y' at this point is 1. With loss scaling, the
<del> # gradient is 'loss_scale'. We divide by the batch size of 2 since the
<del> # loss is averaged across batch elements.
<add> # The gradient of 'y' at this point is 1. With loss scaling,
<add> # the gradient is 'loss_scale'. We divide by the batch size
<add> # of 2 since the loss is averaged across batch elements.
<ide> expected_gradient = loss_scale / 2
<ide> identity_with_grad_check_fn = (
<ide> mp_test_util.create_identity_with_grad_check_fn(
<ide> def test_dynamic_loss_scaling(self, strategy_fn, get_config=False):
<ide> expected_gradient = backend.variable(
<ide> [initial_loss_scale / batch_size], dtype=tf.float16
<ide> )
<del> # If this variable is set to True, the model below will have NaN gradients
<add> # If this variable is set to True, the model below will have NaN
<add> # gradients
<ide> have_nan_gradients = backend.variable(False, dtype=tf.bool)
<ide> with strategy.scope():
<ide> opt = gradient_descent.SGD(1.0)
<ide> def loss_fn(y_true, y_pred):
<ide> y = np.ones((batch_size, 1))
<ide> dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
<ide> model.fit(dataset)
<del> # The variables starts with 1 and has a gradient of 1, so will go down by 1
<del> # each step.
<add> # The variables starts with 1 and has a gradient of 1, so will go down
<add> # by 1 each step.
<ide> self.assertEqual(backend.eval(layer.v), 0)
<ide>
<ide> model.fit(dataset)
<ide> def loss_fn(y_true, y_pred):
<ide>
<ide> # Test with finite gradients again
<ide> backend.set_value(have_nan_gradients, False)
<del> # The loss scale will be halved due to the NaNs, so the gradient will also
<del> # be halved
<add> # The loss scale will be halved due to the NaNs, so the gradient will
<add> # also be halved
<ide> backend.set_value(
<ide> expected_gradient, backend.get_value(expected_gradient / 2)
<ide> )
<ide> def test_compile_wraps_with_loss_scale_optimizer(self):
<ide> model.optimizer, loss_scale_optimizer.LossScaleOptimizer
<ide> )
<ide>
<del> # Test if an LSO is passed, optimizer is not automatically wrapped with
<del> # another LSO
<add> # Test if an LSO is passed, optimizer is not automatically wrapped
<add> # with another LSO
<ide> model = models.Model(x, y)
<ide> optimizer = loss_scale_optimizer.LossScaleOptimizer(
<ide> gradient_descent.SGD(1.0), dynamic_growth_steps=2
<ide> def test_save_slot_variables_with_autocast_vars(
<ide> p = policy.Policy("mixed_float16")
<ide> with strategy_fn().scope(), policy.policy_scope(p):
<ide> x = layers.Input(shape=(2,), batch_size=2)
<del> # Having a var_name other than 'v' tests that a fixed bug (b/134713714)
<del> # does not reoccur. The bug was that a crash would occur when saving a
<del> # checkpoint where an AutoCastVariable with a slot variable would have a
<del> # different name than the layer attribute's name (layer.v in this case).
<add> # Having a var_name other than 'v' tests that a fixed bug
<add> # (b/134713714) does not reoccur. The bug was that a crash would
<add> # occur when saving a checkpoint where an AutoCastVariable with a
<add> # slot variable would have a different name than the layer
<add> # attribute's name (layer.v in this case).
<ide> layer = mp_test_util.MultiplyLayer(
<ide> assert_type=tf.float16, var_name=var_name
<ide> )
<ide> def test_restore_old_saved_model(self):
<ide> },
<ide> )
<ide> def test_save_model_with_dynamic_loss_scaling(self, strategy_fn, h5=False):
<del> # TODO(reedwm): Support and test saving model with a mixed_[b]float16 policy
<del> # as well.
<add> # TODO(reedwm): Support and test saving model with a mixed_[b]float16
<add> # policy as well.
<ide> strategy = strategy_fn()
<ide> if (
<ide> isinstance(strategy, tf.distribute.MirroredStrategy)
<ide> def test_save_model_with_dynamic_loss_scaling(self, strategy_fn, h5=False):
<ide> (weight,) = model.trainable_weights
<ide> loaded_weight = backend.get_value(weight)
<ide> self.assertEqual(loaded_weight, orig_weight)
<del> # Currently the loss scale isn't always saved when the model is saved with
<del> # Model.save(). So we assert the loss scale either has the value when it was
<del> # saved, or the value it was initialized with.
<add> # Currently the loss scale isn't always saved when the model is saved
<add> # with Model.save(). So we assert the loss scale either has the value
<add> # when it was saved, or the value it was initialized with.
<ide> # TODO(reedwm): Always save/restore the loss scale with Model.save().
<ide> self.assertIn(backend.get_value(model.optimizer.loss_scale), (1, 2))
<ide> self.assertIn(
<ide><path>keras/mixed_precision/policy.py
<ide> class Policy:
<ide> `tf.keras.mixed_precision.set_global_policy`.
<ide>
<ide> Args:
<del> name: The policy name, which determines the compute and variable dtypes. Can
<del> be any dtype name, such as `'float32'` or `'float64'`, which causes both
<del> the compute and variable dtypes will be that dtype. Can also be the string
<del> `'mixed_float16'` or `'mixed_bfloat16'`, which causes the compute dtype to
<del> be float16 or bfloat16 and the variable dtype to be float32.
<add> name: The policy name, which determines the compute and variable dtypes.
<add> Can be any dtype name, such as `'float32'` or `'float64'`, which causes
<add> both the compute and variable dtypes will be that dtype. Can also be the
<add> string `'mixed_float16'` or `'mixed_bfloat16'`, which causes the compute
<add> dtype to be float16 or bfloat16 and the variable dtype to be float32.
<ide>
<ide> Typically you only need to interact with dtype policies when using mixed
<ide> precision, which is the use of float16 or bfloat16 for computations and
<ide> float32 for variables. This is why the term `mixed_precision` appears in the
<ide> API name. Mixed precision can be enabled by passing `'mixed_float16'` or
<ide> `'mixed_bfloat16'` to `tf.keras.mixed_precision.set_global_policy`. See [the
<del> mixed precision guide](https://www.tensorflow.org/guide/keras/mixed_precision)
<del> for more information on how to use mixed precision.
<add> mixed precision
<add> guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
<add> information on how to use mixed precision.
<ide>
<ide> >>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
<ide> >>> layer1 = tf.keras.layers.Dense(10)
<ide> >>> layer1.dtype_policy # `layer1` will automatically use mixed precision
<ide> <Policy "mixed_float16">
<del> >>> # Can optionally override layer to use float32 instead of mixed precision.
<add> >>> # Can optionally override layer to use float32
<add> >>> # instead of mixed precision.
<ide> >>> layer2 = tf.keras.layers.Dense(10, dtype='float32')
<ide> >>> layer2.dtype_policy
<ide> <Policy "float32">
<ide> >>> # Set policy back to initial float32 for future examples.
<ide> >>> tf.keras.mixed_precision.set_global_policy('float32')
<ide>
<del> In the example above, passing `dtype='float32'` to the layer is equivalent to
<del> passing `dtype=tf.keras.mixed_precision.Policy('float32')`. In general,
<add> In the example above, passing `dtype='float32'` to the layer is equivalent
<add> to passing `dtype=tf.keras.mixed_precision.Policy('float32')`. In general,
<ide> passing a dtype policy name to a layer is equivalent to passing the
<ide> corresponding policy, so it is never necessary to explicitly construct a
<ide> `Policy` object.
<ide>
<ide> Note: `Model.compile` will automatically wrap an optimizer with a
<del> `tf.keras.mixed_precision.LossScaleOptimizer` if you use the `'mixed_float16'`
<del> policy. If you use a custom training loop instead of calling `Model.compile`,
<del> you should explicitly use a `tf.keras.mixed_precision.LossScaleOptimizer` to
<del> avoid numeric underflow with float16.
<add> `tf.keras.mixed_precision.LossScaleOptimizer` if you use the
<add> `'mixed_float16'` policy. If you use a custom training loop instead of
<add> calling `Model.compile`, you should explicitly use a
<add> `tf.keras.mixed_precision.LossScaleOptimizer` to avoid numeric underflow
<add> with float16.
<ide>
<ide> ### How a layer uses its policy's compute dtype
<ide>
<ide> class Policy:
<ide> Note that the base `tf.keras.layers.Layer` class inserts the casts. If
<ide> subclassing your own layer, you do not have to insert any casts.
<ide>
<del> Currently, only tensors in the first argument to the layer's `call` method are
<del> casted (although this will likely be changed in a future minor release). For
<del> example:
<add> Currently, only tensors in the first argument to the layer's `call` method
<add> are casted (although this will likely be changed in a future minor release).
<add> For example:
<ide>
<ide> >>> class MyLayer(tf.keras.layers.Layer):
<ide> ... # Bug! `b` will not be casted.
<ide> class Policy:
<ide> tf.float32
<ide>
<ide> If writing your own layer with multiple inputs, you should either explicitly
<del> cast other tensors to `self.compute_dtype` in `call` or accept all tensors in
<del> the first argument as a list.
<add> cast other tensors to `self.compute_dtype` in `call` or accept all tensors
<add> in the first argument as a list.
<ide>
<ide> The casting only occurs in TensorFlow 2. If
<ide> `tf.compat.v1.disable_v2_behavior()` has been called, you can enable the
<del> casting behavior with `tf.compat.v1.keras.layers.enable_v2_dtype_behavior()`.
<add> casting behavior with
<add> `tf.compat.v1.keras.layers.enable_v2_dtype_behavior()`.
<ide>
<ide> ### How a layer uses its policy's variable dtype
<ide>
<ide> The default dtype of variables created by `tf.keras.layers.Layer.add_weight`
<ide> is the layer's policy's variable dtype.
<ide>
<ide> If a layer's compute and variable dtypes differ, `add_weight` will wrap
<del> floating-point variables with a special wrapper called an `AutoCastVariable`.
<del> `AutoCastVariable` is identical to the original variable except it casts
<del> itself to the layer's compute dtype when used within `Layer.call`. This means
<del> if you are writing a layer, you do not have to explicitly cast the variables
<del> to the layer's compute dtype. For example:
<add> floating-point variables with a special wrapper called an
<add> `AutoCastVariable`. `AutoCastVariable` is identical to the original
<add> variable except it casts itself to the layer's compute dtype when used
<add> within `Layer.call`. This means if you are writing a layer, you do not have
<add> to explicitly cast the variables to the layer's compute dtype. For example:
<ide>
<ide> >>> class SimpleDense(tf.keras.layers.Layer):
<ide> ...
<ide> class Policy:
<ide>
<ide> For the most part, layers will automatically support mixed precision and
<ide> float64 without any additional work, due to the fact the base layer
<del> automatically casts inputs, creates variables of the correct type, and in the
<del> case of mixed precision, wraps variables with `AutoCastVariables`.
<add> automatically casts inputs, creates variables of the correct type, and in
<add> the case of mixed precision, wraps variables with `AutoCastVariables`.
<ide>
<ide> The primary case where you need extra work to support mixed precision or
<ide> float64 is when you create a new tensor, such as with `tf.ones` or
<ide> `tf.random.normal`, In such cases, you must create the tensor of the correct
<del> dtype. For example, if you call `tf.random.normal`, you must pass the compute
<del> dtype, which is the dtype the inputs have been casted to:
<add> dtype. For example, if you call `tf.random.normal`, you must pass the
<add> compute dtype, which is the dtype the inputs have been casted to:
<ide>
<ide> >>> class AddRandom(tf.keras.layers.Layer):
<ide> ...
<ide> class Policy:
<ide>
<ide> If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a
<ide> `TypeError` would have occurred. This is because the `tf.random.normal`'s
<del> dtype defaults to `"float32"`, but the input dtype is float16. You cannot add
<del> a float32 tensor with a float16 tensor.
<add> dtype defaults to `"float32"`, but the input dtype is float16. You cannot
<add> add a float32 tensor with a float16 tensor.
<ide> """
<ide>
<ide> def __init__(self, name):
<ide> def _parse_name(self, name):
<ide> return "bfloat16", "float32"
<ide> elif name == "_infer":
<ide> # The "_infer" policy exists only for compatibility with TF 1, where
<del> # "_infer" is the default. The behavior matches the behavior of TF 1's
<del> # behavior before policies were introduced. With "_infer", the computation
<del> # and variable dtype are inferred from the first input the first time the
<del> # layer is called. Once the layer is called for the first time, the
<del> # layer's policy will change to the dtype of the first input, and it will
<del> # no longer have the "_infer" policy.
<add> # "_infer" is the default. The behavior matches the behavior of TF
<add> # 1's behavior before policies were introduced. With "_infer", the
<add> # computation and variable dtype are inferred from the first input
<add> # the first time the layer is called. Once the layer is called for
<add> # the first time, the layer's policy will change to the dtype of the
<add> # first input, and it will no longer have the "_infer" policy.
<ide> #
<del> # The infer policy should be considered an implementation detail and may
<del> # be removed in the future.
<add> # The infer policy should be considered an implementation detail and
<add> # may be removed in the future.
<ide> return None, None
<ide>
<ide> try:
<ide> def variable_dtype(self):
<ide>
<ide> This is the dtype layers will create their variables in, unless a layer
<ide> explicitly chooses a different dtype. If this is different than
<del> `Policy.compute_dtype`, Layers will cast variables to the compute dtype to
<del> avoid type errors.
<add> `Policy.compute_dtype`, Layers will cast variables to the compute dtype
<add> to avoid type errors.
<ide>
<del> Variable regularizers are run in the variable dtype, not the compute dtype.
<add> Variable regularizers are run in the variable dtype, not the compute
<add> dtype.
<ide>
<ide> Returns:
<ide> The variable dtype of this policy, as a string.
<ide> def compute_dtype(self):
<ide> This is the dtype layers will do their computations in. Typically layers
<ide> output tensors with the compute dtype as well.
<ide>
<del> Note that even if the compute dtype is float16 or bfloat16, hardware devices
<del> may not do individual adds, multiplies, and other fundamental operations in
<del> float16 or bfloat16, but instead may do some of them in float32 for numeric
<del> stability. The compute dtype is the dtype of the inputs and outputs of the
<del> TensorFlow ops that the layer executes. Internally, many TensorFlow ops will
<del> do certain internal calculations in float32 or some other device-internal
<del> intermediate format with higher precision than float16/bfloat16, to increase
<del> numeric stability.
<add> Note that even if the compute dtype is float16 or bfloat16, hardware
<add> devices may not do individual adds, multiplies, and other fundamental
<add> operations in float16 or bfloat16, but instead may do some of them in
<add> float32 for numeric stability. The compute dtype is the dtype of the
<add> inputs and outputs of the TensorFlow ops that the layer executes.
<add> Internally, many TensorFlow ops will do certain internal calculations in
<add> float32 or some other device-internal intermediate format with higher
<add> precision than float16/bfloat16, to increase numeric stability.
<ide>
<ide> For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
<del> float16 compute dtype, will pass float16 inputs to `tf.linalg.matmul`. But,
<del> `tf.linalg.matmul` will do use float32 intermediate math. The performance
<del> benefit of float16 is still apparent, due to increased memory bandwidth and
<del> the fact modern GPUs have specialized hardware for computing matmuls on
<del> float16 inputs while still keeping intermediate computations in float32.
<add> float16 compute dtype, will pass float16 inputs to `tf.linalg.matmul`.
<add> But, `tf.linalg.matmul` will do use float32 intermediate math. The
<add> performance benefit of float16 is still apparent, due to increased
<add> memory bandwidth and the fact modern GPUs have specialized hardware for
<add> computing matmuls on float16 inputs while still keeping intermediate
<add> computations in float32.
<ide>
<ide> Returns:
<ide> The compute dtype of this policy, as a string.
<ide> def global_policy():
<ide> """Returns the global dtype policy.
<ide>
<ide> The global policy is the default `tf.keras.mixed_precision.Policy` used for
<del> layers, if no policy is passed to the layer constructor. If no policy has been
<del> set with `keras.mixed_precision.set_global_policy`, this will return a policy
<del> constructed from `tf.keras.backend.floatx()` (floatx defaults to float32).
<add> layers, if no policy is passed to the layer constructor. If no policy has
<add> been set with `keras.mixed_precision.set_global_policy`, this will return a
<add> policy constructed from `tf.keras.backend.floatx()` (floatx defaults to
<add> float32).
<ide>
<ide> >>> tf.keras.mixed_precision.global_policy()
<ide> <Policy "float32">
<ide> def global_policy():
<ide> def _check_if_mixed_precision_graph_rewrite_is_enabled(policy):
<ide> if tf.__internal__.train.is_mixed_precision_graph_rewrite_enabled():
<ide> raise ValueError(
<del> 'The global dtype policy cannot be set to "{policy.name}", because the '
<del> "mixed precision graph rewrite has already been enabled.\n"
<add> 'The global dtype policy cannot be set to "{policy.name}", because '
<add> "the mixed precision graph rewrite has already been enabled.\n"
<ide> "At most, one of the following can be called:\n\n"
<ide> " 1. tf.compat.v1.train.enable_mixed_precision_graph_rewrite() "
<ide> "(You called this first)\n"
<ide> " 2. tf.keras.mixed_precision.set_global_policy() with a mixed "
<ide> "precision policy (You called this second)\n\n"
<del> "You called both functions, which is an error, because both functions "
<del> "enable you to use mixed precision. If in doubt which function to use, "
<del> "use the second, as it supports Eager execution and is more "
<del> "customizable.".format(policy=policy)
<add> "You called both functions, which is an error, because both "
<add> "functions enable you to use mixed precision. If in doubt which "
<add> "function to use, use the second, as it supports Eager execution "
<add> "and is more customizable.".format(policy=policy)
<ide> )
<ide>
<ide>
<ide> def set_global_policy(policy):
<ide> <Policy "mixed_float16">
<ide> >>> tf.keras.layers.Dense(10).dtype_policy
<ide> <Policy "mixed_float16">
<del> >>> # Global policy is not used if a policy is directly passed to constructor
<add> >>> # Global policy is not used if a policy
<add> >>> # is directly passed to constructor
<ide> >>> tf.keras.layers.Dense(10, dtype='float64').dtype_policy
<ide> <Policy "float64">
<ide> >>> tf.keras.mixed_precision.set_global_policy('float32')
<ide> def _is_convertible_to_dtype(dtype):
<ide> def _policy_equivalent_to_dtype(policy):
<ide> """Returns True if the Policy is equivalent to a single dtype.
<ide>
<del> A policy is equivalent to a single dtype if the policy's compute and variable
<del> dtypes are the same and the policy's type is Policy and not a subclass of
<del> Policy.
<add> A policy is equivalent to a single dtype if the policy's compute and
<add> variable dtypes are the same and the policy's type is Policy and not a
<add> subclass of Policy.
<ide>
<ide> The "_infer" policy is considered equivalent to a single dtype.
<ide>
<ide> def _policy_equivalent_to_dtype(policy):
<ide> def serialize(policy):
<ide> if _policy_equivalent_to_dtype(policy):
<ide> # We return either None or the policy name for compatibility with older
<del> # versions of Keras. If the policy name is returned, it is a dtype string
<del> # such as 'float32'.
<add> # versions of Keras. If the policy name is returned, it is a dtype
<add> # string such as 'float32'.
<ide> return None if policy.name == "_infer" else policy.name
<ide> return generic_utils.serialize_keras_object(policy)
<ide>
<ide><path>keras/mixed_precision/policy_test.py
<ide> def test_global_policy(self):
<ide> try:
<ide> mp_policy.set_global_policy("mixed_float16")
<ide> self.assertEqual(mp_policy.global_policy().name, "mixed_float16")
<del> with tf.Graph().as_default(): # Policies are not associated with a graph
<add> # Policies are not associated with a graph
<add> with tf.Graph().as_default():
<ide> self.assertEqual(
<ide> mp_policy.global_policy().name, "mixed_float16"
<ide> )
<ide> def test_global_policy_dtype_error(self):
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<ide> "set_global_policy can only be used to set the global policy to "
<del> 'floating-point policies, such as "float32" and "mixed_float16", but '
<del> "got policy: int32",
<add> 'floating-point policies, such as "float32" and "mixed_float16", '
<add> "but got policy: int32",
<ide> ):
<ide> mp_policy.set_global_policy("int32")
<ide> with self.assertRaisesRegex(
<ide> ValueError,
<ide> "set_global_policy can only be used to set the global policy to "
<del> 'floating-point policies, such as "float32" and "mixed_float16", but '
<del> "got policy: complex64",
<add> 'floating-point policies, such as "float32" and "mixed_float16", '
<add> "but got policy: complex64",
<ide> ):
<ide> mp_policy.set_global_policy(mp_policy.Policy("complex64"))
<ide>
<ide> def test_device_compatibility_warning(self):
<ide> else:
<ide> self.assertRegex(
<ide> mock_warn.call_args[0][0],
<del> r"Mixed precision compatibility check \(mixed_float16\): WARNING.*",
<add> r"Mixed precision compatibility check \(mixed_float16\): "
<add> r"WARNING.*",
<ide> )
<ide>
<ide> if tf.config.list_physical_devices("GPU"):
<ide> def test_config(self):
<ide> ):
<ide> config = policy.get_config()
<ide> new_policy = mp_policy.Policy.from_config(config)
<del> # Comparing strings is the easiest way to ensure the policies are the
<del> # same, as policy does not override the == operator.
<add> # Comparing strings is the easiest way to ensure the policies are
<add> # the same, as policy does not override the == operator.
<ide> self.assertEqual(str(policy), str(new_policy))
<ide>
<ide> @test_utils.enable_v2_dtype_behavior
<ide><path>keras/mixed_precision/test_util.py
<ide> def _identity_with_grad_check(x):
<ide> x = tf.identity(x)
<ide>
<ide> def grad(dx):
<del> """Gradient function that asserts the gradient has a certain value."""
<add> """Gradient function that asserts the gradient has a certain
<add> value."""
<ide> if expected_dtype:
<ide> assert (
<ide> dx.dtype == expected_dtype
<ide> def grad(dx):
<ide> expected_tensor = tf.convert_to_tensor(
<ide> expected_gradient, dtype=dx.dtype, name="expected_gradient"
<ide> )
<del> # Control dependency is to ensure input is available. It's possible the
<del> # dataset will throw a StopIteration to indicate there is no more data, in
<del> # which case we don't want to run the assertion.
<add> # Control dependency is to ensure input is available. It's possible
<add> # the dataset will throw a StopIteration to indicate there is no
<add> # more data, in which case we don't want to run the assertion.
<ide> with tf.control_dependencies([x]):
<ide> assert_op = tf.compat.v1.assert_equal(dx, expected_tensor)
<ide> with tf.control_dependencies([assert_op]):
<ide> def create_identity_with_nan_gradients_fn(have_nan_gradients):
<ide> """Returns a function that optionally has NaN gradients.
<ide>
<ide> This serves as a hook to introduce NaN gradients to a model. This returns an
<del> identity function. The identity's gradient function will check if the boolean
<del> tensor `have_nan_gradients` is True. If so, the gradient will be NaN.
<del> Otherwise, the gradient will also be the identity.
<add> identity function. The identity's gradient function will check if the
<add> boolean tensor `have_nan_gradients` is True. If so, the gradient will be
<add> NaN. Otherwise, the gradient will also be the identity.
<ide>
<ide> Args:
<del> have_nan_gradients: A scalar boolean tensor. If True, gradients will be NaN.
<del> Otherwise, the gradient function is the identity function.
<add> have_nan_gradients: A scalar boolean tensor. If True, gradients will be
<add> NaN. Otherwise, the gradient function is the identity function.
<ide>
<ide> Returns:
<ide> An identity function whose gradient function will return NaNs, if
<ide> def __init__(self, assert_type=None, **kwargs):
<ide> super().__init__(**kwargs)
<ide>
<ide> def assert_input_types(self, inputs):
<del> """Asserts `inputs` are of the correct type. Should be called in call()."""
<add> """Asserts `inputs` are of the correct type. Should be called in
<add> call()."""
<ide> if self._assert_type:
<ide> inputs_flattened = tf.nest.flatten(inputs)
<ide> for inp in inputs_flattened:
<ide> assert inp.dtype.base_dtype == self._assert_type, (
<del> "Input tensor has type %s which does not match assert type %s"
<add> "Input tensor has type %s which does "
<add> "not match assert type %s"
<ide> % (inp.dtype.name, self._assert_type)
<ide> )
<ide>
<ide> def __init__(
<ide> activity_regularizer: The activity regularizer.
<ide> use_operator: If True, add using the * operator. If False, add using
<ide> tf.multiply.
<del> var_name: The name of the variable. It can be useful to pass a name other
<del> than 'v', to test having the attribute name (self.v) being different
<del> from the variable name.
<add> var_name: The name of the variable. It can be useful to pass a name
<add> other than 'v', to test having the attribute name (self.v) being
<add> different from the variable name.
<ide> **kwargs: Passed to AssertTypeLayer constructor.
<ide> """
<ide> self._regularizer = regularizer | 13 |
Javascript | Javascript | fix disabled prop for text | 88a63980b58cb4f820229318dba39f0680b76ba3 | <ide><path>Libraries/Text/Text.js
<ide> const viewConfig = {
<ide> numberOfLines: true,
<ide> ellipsizeMode: true,
<ide> allowFontScaling: true,
<add> disabled: true,
<ide> selectable: true,
<ide> selectionColor: true,
<ide> adjustsFontSizeToFit: true, | 1 |
Go | Go | use local variable for platform | 9839ddd80047c62ce5e28f9436a1709eb2a5555d | <ide><path>api/server/router/image/image_routes.go
<ide> func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
<ide>
<ide> version := httputils.VersionFromContext(ctx)
<ide> if versions.GreaterThanOrEqualTo(version, "1.32") {
<del> apiPlatform := r.FormValue("platform")
<del> if apiPlatform != "" {
<del> sp, err := platforms.Parse(apiPlatform)
<add> if p := r.FormValue("platform"); p != "" {
<add> sp, err := platforms.Parse(p)
<ide> if err != nil {
<ide> return err
<ide> } | 1 |
Javascript | Javascript | fix some inconsequential typos | 305e201b71710f1d7742662d36450b1926f4aeb6 | <ide><path>gdocs.js
<ide> if (flag === '--login') {
<ide> }
<ide>
<ide> function help() {
<del> console.log('Synopsys');
<add> console.log('Synopsis');
<ide> console.log('gdocs.js --login <username>');
<ide> console.log('gdocs.js --fetch [<docs collection>]');
<ide> process.exit(-1);
<ide><path>test/auto/injectorSpec.js
<ide> describe('injector', function() {
<ide>
<ide>
<ide> it('should create $inject', function() {
<del> var extraParans = angular.noop;
<add> var extraParams = angular.noop;
<ide> /* eslint-disable space-before-function-paren */
<ide> // keep the multi-line to make sure we can handle it
<ide> function $f_n0 /*
<ide> describe('injector', function() {
<ide> function(a, b) {}
<ide> */
<ide> _c,
<del> /* {some type} */ d) { extraParans(); }
<add> /* {some type} */ d) { extraParams(); }
<ide> /* eslint-enable */
<ide> expect(annotate($f_n0)).toEqual(['$a', 'b_', '_c', 'd']);
<ide> expect($f_n0.$inject).toEqual(['$a', 'b_', '_c', 'd']);
<ide><path>test/ng/compileSpec.js
<ide> describe('$compile', function() {
<ide> testReplaceElementCleanup({});
<ide> });
<ide> it('should clean data of elements removed for directive templateUrl', function() {
<del> testReplaceElementCleanup({asyncTmeplate: true});
<add> testReplaceElementCleanup({asyncTemplate: true});
<ide> });
<ide> it('should clean data of elements transcluded into directive template', function() {
<ide> testReplaceElementCleanup({transclude: true});
<ide> });
<ide> it('should clean data of elements transcluded into directive templateUrl', function() {
<del> testReplaceElementCleanup({transclude: true, asyncTmeplate: true});
<add> testReplaceElementCleanup({transclude: true, asyncTemplate: true});
<ide> });
<ide> it('should clean data of elements replaced with directive template', function() {
<ide> testReplaceElementCleanup({replace: true});
<ide><path>test/ng/directive/selectSpec.js
<ide> describe('select', function() {
<ide> scope.robot = '';
<ide> compile('<select ng-model="robot">' +
<ide> '<option ng-repeat="opt in dynamicOptions" value="{{opt.val}}">{{opt.display}}</option>' +
<del> '</selec>');
<add> '</select>');
<ide> expect(element).toEqualSelect(['? string: ?']);
<ide>
<ide>
<ide> describe('select', function() {
<ide> compile('<select ng-model="robot">' +
<ide> '<option value="">--static-select--</option>' +
<ide> '<option ng-repeat="opt in dynamicOptions" value="{{opt.val}}">{{opt.display}}</option>' +
<del> '</selec>');
<add> '</select>');
<ide> scope.$digest();
<ide> expect(element).toEqualSelect([unknownValue('x')], '');
<ide> | 4 |
PHP | PHP | throw exception on invalid smtp client | 0997a53d5ad46c7ba946f85103ff1358feaf9505 | <ide><path>src/Mailer/Transport/SmtpTransport.php
<ide> protected function _connect(): void
<ide>
<ide> $host = 'localhost';
<ide> if (isset($config['client'])) {
<add> if (empty($config['client'])) {
<add> throw new SocketException('Cannot use an empty client name.');
<add> }
<ide> $host = $config['client'];
<ide> } else {
<ide> /** @var string $httpHost */
<ide><path>tests/TestCase/Mailer/Transport/SmtpTransportTest.php
<ide> public function testQuit()
<ide> }
<ide>
<ide> /**
<del> * testEmptyConfigArray method
<add> * Tests using empty client name
<ide> *
<ide> * @return void
<ide> */
<del> public function testEmptyConfigArray()
<add> public function testEmptyClientName()
<ide> {
<del> $this->SmtpTransport->setConfig([
<del> 'client' => 'myhost.com',
<del> 'port' => 666,
<del> ]);
<del> $expected = $this->SmtpTransport->getConfig();
<add> $this->socket->expects($this->any())->method('connect')->will($this->returnValue(true));
<add> $this->socket->expects($this->any())
<add> ->method('read')
<add> ->will($this->onConsecutiveCalls("220 Welcome message\r\n", "250 Accepted\r\n"));
<ide>
<del> $this->assertSame(666, $expected['port']);
<add> $this->SmtpTransport->setConfig(['client' => '']);
<ide>
<del> $this->SmtpTransport->setConfig([]);
<del> $result = $this->SmtpTransport->getConfig();
<del> $this->assertEquals($expected, $result);
<add> $this->expectException(SocketException::class);
<add> $this->expectExceptionMessage('Cannot use an empty client name');
<add> $this->SmtpTransport->connect();
<ide> }
<ide>
<ide> /** | 2 |
Javascript | Javascript | use fetch in fileloader | 8b80974628275e16583467410d0f488161c5a81b | <ide><path>src/loaders/FileLoader.js
<ide> class FileLoader extends Loader {
<ide>
<ide> url = this.manager.resolveURL( url );
<ide>
<del> const scope = this;
<del>
<ide> const cached = Cache.get( url );
<ide>
<ide> if ( cached !== undefined ) {
<ide>
<del> scope.manager.itemStart( url );
<add> this.manager.itemStart( url );
<ide>
<del> setTimeout( function () {
<add> setTimeout( () => {
<ide>
<ide> if ( onLoad ) onLoad( cached );
<ide>
<del> scope.manager.itemEnd( url );
<add> this.manager.itemEnd( url );
<ide>
<ide> }, 0 );
<ide>
<ide> class FileLoader extends Loader {
<ide>
<ide> }
<ide>
<del> // Check for data: URI
<del> const dataUriRegex = /^data:(.*?)(;base64)?,(.*)$/;
<del> const dataUriRegexResult = url.match( dataUriRegex );
<del> let request;
<del>
<del> // Safari can not handle Data URIs through XMLHttpRequest so process manually
<del> if ( dataUriRegexResult ) {
<del>
<del> const mimeType = dataUriRegexResult[ 1 ];
<del> const isBase64 = !! dataUriRegexResult[ 2 ];
<del>
<del> let data = dataUriRegexResult[ 3 ];
<del> data = decodeURIComponent( data );
<del>
<del> if ( isBase64 ) data = atob( data );
<del>
<del> try {
<del>
<del> let response;
<del> const responseType = ( this.responseType || '' ).toLowerCase();
<del>
<del> switch ( responseType ) {
<del>
<del> case 'arraybuffer':
<del> case 'blob':
<del>
<del> const view = new Uint8Array( data.length );
<del>
<del> for ( let i = 0; i < data.length; i ++ ) {
<del>
<del> view[ i ] = data.charCodeAt( i );
<del>
<del> }
<del>
<del> if ( responseType === 'blob' ) {
<del>
<del> response = new Blob( [ view.buffer ], { type: mimeType } );
<add> // Initialise array for duplicate requests
<add> loading[ url ] = [];
<ide>
<del> } else {
<add> loading[ url ].push( {
<add> onLoad: onLoad,
<add> onProgress: onProgress,
<add> onError: onError,
<add> } );
<ide>
<del> response = view.buffer;
<add> // create request
<add> const req = new Request( url, {
<add> headers: new Headers( this.requestHeader ),
<add> credentials: this.withCredentials ? 'include' : 'same-origin',
<add> // An abort controller could be added within a future PR
<add> } );
<ide>
<del> }
<del>
<del> break;
<del>
<del> case 'document':
<del>
<del> const parser = new DOMParser();
<del> response = parser.parseFromString( data, mimeType );
<del>
<del> break;
<del>
<del> case 'json':
<del>
<del> response = JSON.parse( data );
<add> // start the fetch
<add> fetch( req )
<add> .then( response => {
<ide>
<del> break;
<add> if ( response.status === 200 || response.status === 0 ) {
<ide>
<del> default: // 'text' or other
<add> // Some browsers return HTTP Status 0 when using non-http protocol
<add> // e.g. 'file://' or 'data://'. Handle as success.
<ide>
<del> response = data;
<add> if ( response.status === 0 ) {
<ide>
<del> break;
<add> console.warn( 'THREE.FileLoader: HTTP Status 0 received.' );
<ide>
<del> }
<add> }
<ide>
<del> // Wait for next browser tick like standard XMLHttpRequest event dispatching does
<del> setTimeout( function () {
<add> const callbacks = loading[ url ];
<add> const reader = response.body.getReader();
<add> const contentLength = response.headers.get( 'Content-Length' );
<add> const total = contentLength ? parseInt( contentLength ) : 0;
<add> const lengthComputable = total !== 0;
<add> let loaded = 0;
<ide>
<del> if ( onLoad ) onLoad( response );
<add> // periodically read data into the new stream tracking while download progress
<add> return new ReadableStream( {
<add> start( controller ) {
<ide>
<del> scope.manager.itemEnd( url );
<add> readData();
<ide>
<del> }, 0 );
<add> function readData() {
<ide>
<del> } catch ( error ) {
<add> reader.read().then( ( { done, value } ) => {
<ide>
<del> // Wait for next browser tick like standard XMLHttpRequest event dispatching does
<del> setTimeout( function () {
<add> if ( done ) {
<ide>
<del> if ( onError ) onError( error );
<add> controller.close();
<ide>
<del> scope.manager.itemError( url );
<del> scope.manager.itemEnd( url );
<add> } else {
<ide>
<del> }, 0 );
<add> loaded += value.byteLength;
<ide>
<del> }
<add> const event = new ProgressEvent( 'progress', { lengthComputable, loaded, total } );
<add> for ( let i = 0, il = callbacks.length; i < il; i ++ ) {
<ide>
<del> } else {
<add> const callback = callbacks[ i ];
<add> if ( callback.onProgress ) callback.onProgress( event );
<ide>
<del> // Initialise array for duplicate requests
<add> }
<ide>
<del> loading[ url ] = [];
<add> controller.enqueue( value );
<add> readData();
<ide>
<del> loading[ url ].push( {
<add> }
<ide>
<del> onLoad: onLoad,
<del> onProgress: onProgress,
<del> onError: onError
<add> } );
<ide>
<del> } );
<add> }
<ide>
<del> request = new XMLHttpRequest();
<del>
<del> request.open( 'GET', url, true );
<del>
<del> request.addEventListener( 'load', function ( event ) {
<del>
<del> const response = this.response;
<del>
<del> const callbacks = loading[ url ];
<add> }
<ide>
<del> delete loading[ url ];
<add> } );
<ide>
<del> if ( this.status === 200 || this.status === 0 ) {
<add> } else {
<ide>
<del> // Some browsers return HTTP Status 0 when using non-http protocol
<del> // e.g. 'file://' or 'data://'. Handle as success.
<add> throw Error( `fetch for "${response.url}" responded with ${response.status}: ${response.statusText}` );
<ide>
<del> if ( this.status === 0 ) console.warn( 'THREE.FileLoader: HTTP Status 0 received.' );
<add> }
<ide>
<del> // Add to cache only on HTTP success, so that we do not cache
<del> // error response bodies as proper responses to requests.
<del> Cache.add( url, response );
<add> } )
<add> .then( stream => {
<ide>
<del> for ( let i = 0, il = callbacks.length; i < il; i ++ ) {
<add> const response = new Response( stream );
<ide>
<del> const callback = callbacks[ i ];
<del> if ( callback.onLoad ) callback.onLoad( response );
<add> switch ( this.responseType ) {
<ide>
<del> }
<add> case 'arraybuffer':
<ide>
<del> scope.manager.itemEnd( url );
<add> return response.arrayBuffer();
<ide>
<del> } else {
<add> case 'blob':
<ide>
<del> for ( let i = 0, il = callbacks.length; i < il; i ++ ) {
<add> return response.blob();
<ide>
<del> const callback = callbacks[ i ];
<del> if ( callback.onError ) callback.onError( event );
<add> case 'document':
<ide>
<del> }
<add> return response.text()
<add> .then( text => {
<ide>
<del> scope.manager.itemError( url );
<del> scope.manager.itemEnd( url );
<add> const parser = new DOMParser();
<add> return parser.parseFromString( text, this.mimeType );
<ide>
<del> }
<add> } );
<ide>
<del> }, false );
<add> case 'json':
<ide>
<del> request.addEventListener( 'progress', function ( event ) {
<add> return response.json();
<ide>
<del> const callbacks = loading[ url ];
<add> default:
<ide>
<del> for ( let i = 0, il = callbacks.length; i < il; i ++ ) {
<del>
<del> const callback = callbacks[ i ];
<del> if ( callback.onProgress ) callback.onProgress( event );
<add> return response.text();
<ide>
<ide> }
<ide>
<del> }, false );
<add> } )
<add> .then( data => {
<ide>
<del> request.addEventListener( 'error', function ( event ) {
<add> // Add to cache only on HTTP success, so that we do not cache
<add> // error response bodies as proper responses to requests.
<add> Cache.add( url, data );
<ide>
<ide> const callbacks = loading[ url ];
<del>
<ide> delete loading[ url ];
<ide>
<ide> for ( let i = 0, il = callbacks.length; i < il; i ++ ) {
<ide>
<ide> const callback = callbacks[ i ];
<del> if ( callback.onError ) callback.onError( event );
<add> if ( callback.onLoad ) callback.onLoad( data );
<ide>
<ide> }
<ide>
<del> scope.manager.itemError( url );
<del> scope.manager.itemEnd( url );
<add> this.manager.itemEnd( url );
<ide>
<del> }, false );
<add> } )
<add> .catch( err => {
<ide>
<del> request.addEventListener( 'abort', function ( event ) {
<add> // Abort errors and other errors are handled the same
<ide>
<ide> const callbacks = loading[ url ];
<del>
<ide> delete loading[ url ];
<ide>
<ide> for ( let i = 0, il = callbacks.length; i < il; i ++ ) {
<ide>
<ide> const callback = callbacks[ i ];
<del> if ( callback.onError ) callback.onError( event );
<add> if ( callback.onError ) callback.onError( err );
<ide>
<ide> }
<ide>
<del> scope.manager.itemError( url );
<del> scope.manager.itemEnd( url );
<del>
<del> }, false );
<del>
<del> if ( this.responseType !== undefined ) request.responseType = this.responseType;
<del> if ( this.withCredentials !== undefined ) request.withCredentials = this.withCredentials;
<add> this.manager.itemError( url );
<add> this.manager.itemEnd( url );
<ide>
<del> if ( request.overrideMimeType ) request.overrideMimeType( this.mimeType !== undefined ? this.mimeType : 'text/plain' );
<del>
<del> for ( const header in this.requestHeader ) {
<del>
<del> request.setRequestHeader( header, this.requestHeader[ header ] );
<del>
<del> }
<del>
<del> request.send( null );
<del>
<del> }
<del>
<del> scope.manager.itemStart( url );
<add> } );
<ide>
<del> return request;
<add> this.manager.itemStart( url );
<ide>
<ide> }
<ide> | 1 |
Ruby | Ruby | add compatibility for `macos.release` | 14c99abc6512d066b36f558c82c42abf7bf138c4 | <ide><path>Library/Homebrew/compat/macos.rb
<ide> def has_apple_developer_tools?
<ide> odeprecated "MacOS.has_apple_developer_tools?", "DevelopmentTools.installed?"
<ide> DevelopmentTools.installed?
<ide> end
<add>
<add> def release
<add> odeprecated "MacOS.release", "MacOS.version"
<add> version
<add> end
<ide> end
<ide> end
<ide> end | 1 |
Ruby | Ruby | fix rubocop offenses | b4e8bfe716c787edededeaa3475a78bfdb15ddd6 | <ide><path>Library/Homebrew/cask/artifact/abstract_artifact.rb
<ide> def staged_path_join_executable(path)
<ide>
<ide> def <=>(other)
<ide> return unless other.class < AbstractArtifact
<del> return 0 if self.class == other.class
<add> return 0 if instance_of?(other.class)
<ide>
<ide> @@sort_order ||= [ # rubocop:disable Style/ClassVars
<ide> PreflightBlock,
<ide><path>Library/Homebrew/config.rb
<ide> def get_env_or_raise(env)
<ide> HOMEBREW_LOGS = Pathname.new(get_env_or_raise("HOMEBREW_LOGS")).expand_path.freeze
<ide>
<ide> # Must use `/tmp` instead of `TMPDIR` because long paths break Unix domain sockets
<del>HOMEBREW_TEMP = begin
<del> tmp = Pathname.new(get_env_or_raise("HOMEBREW_TEMP"))
<add>HOMEBREW_TEMP = Pathname.new(get_env_or_raise("HOMEBREW_TEMP")).yield_self do |tmp|
<ide> tmp.mkpath unless tmp.exist?
<ide> tmp.realpath
<ide> end.freeze
<ide><path>Library/Homebrew/formulary.rb
<ide> def initialize(url)
<ide> end
<ide>
<ide> def load_file(flags:)
<del> if url =~ %r{githubusercontent.com/[\w-]+/[\w-]+/[a-f0-9]{40}(/Formula)?/([\w+-.@]+).rb}
<del> formula_name = Regexp.last_match(2)
<add> if %r{githubusercontent.com/[\w-]+/[\w-]+/[a-f0-9]{40}(?:/Formula)?/(?<formula_name>[\w+-.@]+).rb} =~ url # rubocop:disable Style/CaseLikeIf
<ide> odisabled "Installation of #{formula_name} from a GitHub commit URL",
<ide> "'brew extract #{formula_name}' to stable tap on GitHub"
<ide> elsif url.match?(%r{^(https?|ftp)://})
<ide> def load_file(flags:)
<ide> curl_download url, to: path
<ide> super
<ide> rescue MethodDeprecatedError => e
<del> if url =~ %r{github.com/([\w-]+)/([\w-]+)/}
<del> e.issues_url = "https://github.com/#{Regexp.last_match(1)}/#{Regexp.last_match(2)}/issues/new"
<add> if %r{github.com/(?<user>[\w-]+)/(?<repo>[\w-]+)/} =~ url
<add> e.issues_url = "https://github.com/#{user}/#{repo}/issues/new"
<ide> end
<ide> raise
<ide> end
<ide><path>Library/Homebrew/rubocops/extend/formula.rb
<ide> def parameters_passed?(method_node, *params)
<ide> @offense_source_range = method_node.source_range
<ide> params.all? do |given_param|
<ide> method_params.any? do |method_param|
<del> if given_param.class == Regexp
<add> if given_param.instance_of?(Regexp)
<ide> regex_match_group(method_param, given_param)
<ide> else
<ide> node_equals?(method_param, given_param)
<ide><path>Library/Homebrew/rubocops/uses_from_macos.rb
<ide> class UsesFromMacos < FormulaCop
<ide>
<ide> def audit_formula(_node, _class_node, _parent_class_node, body_node)
<ide> find_method_with_args(body_node, :uses_from_macos, /^"(.+)"/).each do |method|
<del> dep = if parameters(method).first.class == RuboCop::AST::StrNode
<add> dep = if parameters(method).first.instance_of?(RuboCop::AST::StrNode)
<ide> parameters(method).first
<del> elsif parameters(method).first.class == RuboCop::AST::HashNode
<add> elsif parameters(method).first.instance_of?(RuboCop::AST::HashNode)
<ide> parameters(method).first.keys.first
<ide> end
<ide> | 5 |
Go | Go | set canonical name correctly | 7f48cd7dce6fdc077bcde0962e0aa0e73fb63225 | <ide><path>graph/pull.go
<ide> func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
<ide> }
<ide> }
<ide>
<del> requestedTag := repoInfo.CanonicalName
<add> requestedTag := repoInfo.LocalName
<ide> if len(askedTag) > 0 {
<del> requestedTag = utils.ImageReference(repoInfo.CanonicalName, askedTag)
<add> requestedTag = utils.ImageReference(repoInfo.LocalName, askedTag)
<ide> }
<ide> WriteStatus(requestedTag, out, sf, layersDownloaded)
<ide> return nil
<ide> func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo
<ide> }
<ide> }
<ide>
<del> requestedTag := repoInfo.CanonicalName
<add> requestedTag := repoInfo.LocalName
<ide> if len(tag) > 0 {
<del> requestedTag = utils.ImageReference(repoInfo.CanonicalName, tag)
<add> requestedTag = utils.ImageReference(repoInfo.LocalName, tag)
<ide> }
<ide> WriteStatus(requestedTag, out, sf, layersDownloaded)
<ide> return nil
<ide><path>registry/config.go
<ide> func (config *ServiceConfig) NewRepositoryInfo(reposName string) (*RepositoryInf
<ide> repoInfo.RemoteName = "library/" + normalizedName
<ide> }
<ide>
<del> // *TODO: Prefix this with 'docker.io/'.
<del> repoInfo.CanonicalName = repoInfo.LocalName
<add> repoInfo.CanonicalName = "docker.io/" + repoInfo.RemoteName
<ide> } else {
<del> // *TODO: Decouple index name from hostname (via registry configuration?)
<ide> repoInfo.LocalName = repoInfo.Index.Name + "/" + repoInfo.RemoteName
<ide> repoInfo.CanonicalName = repoInfo.LocalName
<ide>
<ide><path>registry/registry_test.go
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "fooo/bar",
<ide> LocalName: "fooo/bar",
<del> CanonicalName: "fooo/bar",
<add> CanonicalName: "docker.io/fooo/bar",
<ide> Official: false,
<ide> },
<ide> "library/ubuntu": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "library/ubuntu",
<ide> LocalName: "ubuntu",
<del> CanonicalName: "ubuntu",
<add> CanonicalName: "docker.io/library/ubuntu",
<ide> Official: true,
<ide> },
<ide> "nonlibrary/ubuntu": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "nonlibrary/ubuntu",
<ide> LocalName: "nonlibrary/ubuntu",
<del> CanonicalName: "nonlibrary/ubuntu",
<add> CanonicalName: "docker.io/nonlibrary/ubuntu",
<ide> Official: false,
<ide> },
<ide> "ubuntu": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "library/ubuntu",
<ide> LocalName: "ubuntu",
<del> CanonicalName: "ubuntu",
<add> CanonicalName: "docker.io/library/ubuntu",
<ide> Official: true,
<ide> },
<ide> "other/library": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "other/library",
<ide> LocalName: "other/library",
<del> CanonicalName: "other/library",
<add> CanonicalName: "docker.io/other/library",
<ide> Official: false,
<ide> },
<ide> "127.0.0.1:8000/private/moonbase": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "public/moonbase",
<ide> LocalName: "public/moonbase",
<del> CanonicalName: "public/moonbase",
<add> CanonicalName: "docker.io/public/moonbase",
<ide> Official: false,
<ide> },
<ide> "index." + IndexServerName() + "/public/moonbase": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "public/moonbase",
<ide> LocalName: "public/moonbase",
<del> CanonicalName: "public/moonbase",
<add> CanonicalName: "docker.io/public/moonbase",
<ide> Official: false,
<ide> },
<ide> IndexServerName() + "/public/moonbase": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "public/moonbase",
<ide> LocalName: "public/moonbase",
<del> CanonicalName: "public/moonbase",
<add> CanonicalName: "docker.io/public/moonbase",
<ide> Official: false,
<ide> },
<ide> "ubuntu-12.04-base": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "library/ubuntu-12.04-base",
<ide> LocalName: "ubuntu-12.04-base",
<del> CanonicalName: "ubuntu-12.04-base",
<add> CanonicalName: "docker.io/library/ubuntu-12.04-base",
<ide> Official: true,
<ide> },
<ide> IndexServerName() + "/ubuntu-12.04-base": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "library/ubuntu-12.04-base",
<ide> LocalName: "ubuntu-12.04-base",
<del> CanonicalName: "ubuntu-12.04-base",
<add> CanonicalName: "docker.io/library/ubuntu-12.04-base",
<ide> Official: true,
<ide> },
<ide> IndexServerName() + "/ubuntu-12.04-base": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "library/ubuntu-12.04-base",
<ide> LocalName: "ubuntu-12.04-base",
<del> CanonicalName: "ubuntu-12.04-base",
<add> CanonicalName: "docker.io/library/ubuntu-12.04-base",
<ide> Official: true,
<ide> },
<ide> "index." + IndexServerName() + "/ubuntu-12.04-base": {
<ide> func TestParseRepositoryInfo(t *testing.T) {
<ide> },
<ide> RemoteName: "library/ubuntu-12.04-base",
<ide> LocalName: "ubuntu-12.04-base",
<del> CanonicalName: "ubuntu-12.04-base",
<add> CanonicalName: "docker.io/library/ubuntu-12.04-base",
<ide> Official: true,
<ide> },
<ide> } | 3 |
PHP | PHP | remove useless imports. | 9820e484d0d01bd9f0d4a194e1a614c8ead5541f | <ide><path>src/Illuminate/Translation/Translator.php
<ide>
<ide> namespace Illuminate\Translation;
<ide>
<del>use Countable;
<ide> use Illuminate\Contracts\Translation\Loader;
<ide> use Illuminate\Contracts\Translation\Translator as TranslatorContract;
<ide> use Illuminate\Support\Arr; | 1 |
Ruby | Ruby | fix typo in deprecation message. [robin dupret] | 70b7e281deeb08f20b2254fd779ddfa0945358f2 | <ide><path>activemodel/lib/active_model/validations/length.rb
<ide> def initialize(options)
<ide> if options[:tokenizer]
<ide> ActiveSupport::Deprecation.warn(<<-EOS.strip_heredoc)
<ide> The `:tokenizer` option is deprecated, and will be removed in Rails 5.1.
<del> You can achieve the same functionality be defining an instance method
<add> You can achieve the same functionality by defining an instance method
<ide> with the value that you want to validate the length of. For example,
<ide>
<ide> validates_length_of :essay, minimum: 100, | 1 |
PHP | PHP | improve doc blocks a bit | 1cd457b11060b890df835c8e74e036f37e486c12 | <ide><path>Cake/ORM/Association/ExternalAssociationTrait.php
<ide> protected function _buildQuery($options) {
<ide> * @return \Cake\ORM\Query
<ide> */
<ide> protected function _addFilteringCondition($query, $key, $filter) {
<del> return $query->andWhere([$key . ' in' => $filter]);
<add> return $query->andWhere([$key . ' IN' => $filter]);
<ide> }
<ide>
<ide> /**
<ide><path>Cake/ORM/Query.php
<ide> protected function _decorateResults($statement) {
<ide>
<ide> /**
<ide> * Applies some defaults to the query object before it is executed.
<del> * Specifically add the FROM clause, adds default table fields if none is
<add> *
<add> * Specifically add the FROM clause, adds default table fields if none are
<ide> * specified and applies the joins required to eager load associations defined
<ide> * using `contain`
<ide> *
<add> * @see Cake\Database\Query::execute()
<ide> * @return Query
<ide> */
<ide> protected function _transformQuery() { | 2 |
Ruby | Ruby | extract template keys to a method | dc1633090e6689d53e688c7bc8357ac3b31f4c70 | <ide><path>actionview/lib/action_view/renderer/partial_renderer.rb
<ide> def initialize(*)
<ide> @context_prefix = @lookup_context.prefixes.first
<ide> end
<ide>
<add> def template_keys
<add> if @has_object || @collection
<add> @locals.keys + retrieve_variable(@path, @as)
<add> else
<add> @locals.keys
<add> end
<add> end
<add>
<ide> def render(context, options, block)
<del> as = as_variable(options)
<add> as = @as = as_variable(options)
<ide> setup(context, options, as, block)
<ide>
<ide> if @path
<ide> @variable = nil
<ide> @variable_counter = nil
<ide> @variable_iteration = nil
<del> @template_keys = @locals.keys
<ide>
<ide> if @has_object || @collection
<ide> @variable, @variable_counter, @variable_iteration = retrieve_variable(@path, as)
<del> @template_keys << @variable
<del>
<del> if @collection
<del> @template_keys << @variable_counter
<del> @template_keys << @variable_iteration
<del> end
<ide> end
<ide>
<del> template = find_template(@path, @template_keys)
<add> template = find_template(@path, template_keys)
<ide> @variable ||= template.variable
<ide> else
<ide> if options[:cached]
<ide> def render_partial(view, template)
<ide> object, as = @object, @variable
<ide>
<ide> if !block && (layout = @options[:layout])
<del> layout = find_template(layout.to_s, @template_keys)
<add> layout = find_template(layout.to_s, template_keys)
<ide> end
<ide>
<ide> object = locals[as] if object.nil? # Respect object when object is false
<ide> def collection_with_template(view, template)
<ide> as, counter, iteration = @variable, @variable_counter, @variable_iteration
<ide>
<ide> if layout = @options[:layout]
<del> layout = find_template(layout, @template_keys)
<add> layout = find_template(layout, template_keys)
<ide> end
<ide>
<ide> partial_iteration = PartialIteration.new(@collection.size) | 1 |
Text | Text | add examples of creating static and dynamic arrays | 77509c6d234fb90b5d7e390d1ea134ffad353a99 | <ide><path>guide/english/cplusplus/arrays/index.md
<ide> There are two types of arrays based on way the array is declared.
<ide> Those arrays whose size is defined before compile time like in the examples above, are called static arrays. In these arrays we can't change their size, once they are declared.
<ide> e.g : int numbers [5];
<ide>
<add>```cpp
<add>//The number of elements must be specified at compile time.
<add>int fixed[5] = {1, 2, 3, 4, 5};
<add>```
<add>
<ide> **2**. Dynamic array:
<ide> Dynamic arrays are those arrays, whose size is not known at compile time and we can define their size at run time as need arises. These arrays are created by using **new** keyword and a pointer variable which points to the newly allocated memory location of the array. We can also free up the memory allocated once the array is not required anymore by using the **delete** keyword.
<ide>
<ide> e.g :
<ide> int * numbers = new int[5];
<ide> ```
<ide>
<add>```cpp
<add>int size;
<add>std::cout << "Enter array size: ";
<add>std::cin >> size;
<add>//The number of elements can be specified at runtime.
<add>int *dynamic = new int[size];
<add>
<add>//Dynamic arrays must be deleted when they are no longer required.
<add>delete[] dynamic;
<add>```
<add>
<ide> ### Access:
<add>
<ide> Elements of an array are accessed using their index. The index of the first element in the array is zero and the second element's index is 1 and so on. You can think of the index of an element as the unit "distance" from the beginning of the array, that is the first element is 0 units from the start.
<ide> Examples using the number array from above:
<ide> ```cpp | 1 |
PHP | PHP | fix typo in session/store | cc889335728166f2ad2bffad6e75095cdf70e7ba | <ide><path>src/Illuminate/Session/Store.php
<ide> public function exists($key)
<ide> }
<ide>
<ide> /**
<del> * Checks if an a key is present and not null.
<add> * Checks if a key is present and not null.
<ide> *
<ide> * @param string|array $key
<ide> * @return bool | 1 |
Python | Python | set version to v3.0.0a15 | 5d09e3e15459d4ad0ec25ef7e0b5b3264eb54dc8 | <ide><path>spacy/about.py
<ide> # fmt: off
<ide> __title__ = "spacy-nightly"
<del>__version__ = "3.0.0a14"
<add>__version__ = "3.0.0a15"
<ide> __release__ = True
<ide> __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
<ide> __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" | 1 |
Text | Text | add section on how to raise security issues | 79411a511e8489dc04405e9138d1abf56f69ba68 | <ide><path>.github/ISSUE_TEMPLATE.md
<ide> <!-- NOTE: If your issue is CodePen Project / Test Suite related, please open it using the below URL instead -->
<ide> <!-- https://github.com/freeCodeCamp/testable-projects-fcc/issues/new -->
<ide>
<add>#### Security
<add>Trying to report a security issue?
<add>
<add>👌 please report security issues to security@freecodecamp.org instead of raising a Github issue. We look forward to working with you. If the issue is significant we'll work on resolving it as quickly as we can. We'll be happy to mention you in a published list of security researchers that found issues in our projects if you so desire.
<add>
<add>
<ide> #### Challenge Name
<ide> <!-- Insert link to challenge below -->
<ide> | 1 |
Javascript | Javascript | restrict supported extensions | 147ea5e3d78effd6da8ea2bfbf6bb77b6aaf52da | <ide><path>lib/internal/errors.js
<ide> E('ERR_WORKER_NEED_ABSOLUTE_PATH',
<ide> TypeError);
<ide> E('ERR_WORKER_UNSERIALIZABLE_ERROR',
<ide> 'Serializing an uncaught exception failed', Error);
<add>E('ERR_WORKER_UNSUPPORTED_EXTENSION',
<add> 'The worker script extension must be ".js" or ".mjs". Received "%s"',
<add> TypeError);
<ide> E('ERR_ZLIB_INITIALIZATION_FAILED', 'Initialization failed', Error);
<ide><path>lib/internal/worker.js
<ide> const util = require('util');
<ide> const {
<ide> ERR_INVALID_ARG_TYPE,
<ide> ERR_WORKER_NEED_ABSOLUTE_PATH,
<del> ERR_WORKER_UNSERIALIZABLE_ERROR
<add> ERR_WORKER_UNSERIALIZABLE_ERROR,
<add> ERR_WORKER_UNSUPPORTED_EXTENSION,
<ide> } = require('internal/errors').codes;
<ide>
<ide> const { internalBinding } = require('internal/bootstrap/loaders');
<ide> class Worker extends EventEmitter {
<ide> throw new ERR_INVALID_ARG_TYPE('filename', 'string', filename);
<ide> }
<ide>
<del> if (!options.eval && !path.isAbsolute(filename)) {
<del> throw new ERR_WORKER_NEED_ABSOLUTE_PATH(filename);
<add> if (!options.eval) {
<add> if (!path.isAbsolute(filename)) {
<add> throw new ERR_WORKER_NEED_ABSOLUTE_PATH(filename);
<add> }
<add> const ext = path.extname(filename);
<add> if (ext !== '.js' && ext !== '.mjs') {
<add> throw new ERR_WORKER_UNSUPPORTED_EXTENSION(ext);
<add> }
<ide> }
<ide>
<ide> // Set up the C++ handle for the worker, as well as some internal wiring.
<ide><path>test/parallel/test-worker-unsupported-path.js
<add>// Flags: --experimental-worker
<add>'use strict';
<add>
<add>const common = require('../common');
<add>const assert = require('assert');
<add>const { Worker } = require('worker');
<add>
<add>{
<add> const expectedErr = common.expectsError({
<add> code: 'ERR_WORKER_NEED_ABSOLUTE_PATH',
<add> type: TypeError
<add> }, 4);
<add> assert.throws(() => { new Worker('a.js'); }, expectedErr);
<add> assert.throws(() => { new Worker('b'); }, expectedErr);
<add> assert.throws(() => { new Worker('c/d.js'); }, expectedErr);
<add> assert.throws(() => { new Worker('a.mjs'); }, expectedErr);
<add>}
<add>
<add>{
<add> const expectedErr = common.expectsError({
<add> code: 'ERR_WORKER_UNSUPPORTED_EXTENSION',
<add> type: TypeError
<add> }, 3);
<add> assert.throws(() => { new Worker('/b'); }, expectedErr);
<add> assert.throws(() => { new Worker('/c.wasm'); }, expectedErr);
<add> assert.throws(() => { new Worker('/d.txt'); }, expectedErr);
<add>} | 3 |
Ruby | Ruby | use sql literal factory method | 0a609eea504f72baead7548d47f0fe707314a033 | <ide><path>activerecord/lib/active_record/association_preload.rb
<ide> def find_associated_records(ids, reflection, preload_options)
<ide> append_conditions(reflection, preload_options)).join(' AND ')
<ide>
<ide> find_options = {
<del> :select => preload_options[:select] || options[:select] || Arel::SqlLiteral.new("#{table_name}.*"),
<add> :select => preload_options[:select] || options[:select] || Arel.sql("#{table_name}.*"),
<ide> :include => preload_options[:include] || options[:include],
<ide> :joins => options[:joins],
<ide> :group => preload_options[:group] || options[:group], | 1 |
Javascript | Javascript | remove unnecessary set of default_max_version | c925d1dcaa13e23ae51769c8e9a20e9141a643a4 | <ide><path>lib/tls.js
<ide> exports.DEFAULT_CIPHERS =
<ide>
<ide> exports.DEFAULT_ECDH_CURVE = 'auto';
<ide>
<del>exports.DEFAULT_MAX_VERSION = 'TLSv1.3';
<del>
<ide> if (getOptionValue('--tls-min-v1.0'))
<ide> exports.DEFAULT_MIN_VERSION = 'TLSv1';
<ide> else if (getOptionValue('--tls-min-v1.1')) | 1 |
Ruby | Ruby | reset migration version before testing migration | e9e7e7d3fd70fb31224b9d1234b04cf41249ff2d | <ide><path>activerecord/test/cases/adapters/postgresql/uuid_test.rb
<ide> def test_schema_dumper_for_uuid_primary_key_default
<ide>
<ide> uses_transaction \
<ide> def test_schema_dumper_for_uuid_primary_key_default_in_legacy_migration
<add> ActiveRecord::SchemaMigration.delete_all
<ide> @verbose_was = ActiveRecord::Migration.verbose
<ide> ActiveRecord::Migration.verbose = false
<ide>
<ide> def test_schema_dumper_for_uuid_primary_key_with_default_override_via_nil
<ide>
<ide> uses_transaction \
<ide> def test_schema_dumper_for_uuid_primary_key_with_default_nil_in_legacy_migration
<add> ActiveRecord::SchemaMigration.delete_all
<ide> @verbose_was = ActiveRecord::Migration.verbose
<ide> ActiveRecord::Migration.verbose = false
<ide> | 1 |
Text | Text | ignore no-literal-urls in readme | 0619b600b2ff66be75e66ef59184c46301e5273f | <ide><path>README.md
<add><!--lint disable no-literal-urls-->
<ide> <p align="center">
<ide> <a href="https://nodejs.org/">
<ide> <img | 1 |
Python | Python | add doc init from list of words and text | 3d2c308906e2bde7ca57d2e8213252530b944502 | <ide><path>spacy/errors.py
<ide> class Errors(object):
<ide> E193 = ("Unable to resize vectors in place if the resized vector dimension "
<ide> "({new_dim}) is not the same as the current vector dimension "
<ide> "({curr_dim}).")
<add> E194 = ("Unable to aligned mismatched text '{text}' and words '{words}'.")
<ide>
<ide>
<ide> @add_codes
<ide><path>spacy/tests/doc/test_creation.py
<ide> from spacy.tokens import Doc
<ide> from spacy.lemmatizer import Lemmatizer
<ide> from spacy.lookups import Lookups
<add>from spacy import util
<ide>
<ide>
<ide> @pytest.fixture
<ide> def test_lookup_lemmatization(vocab):
<ide> assert doc[0].lemma_ == "dog"
<ide> assert doc[1].text == "dogses"
<ide> assert doc[1].lemma_ == "dogses"
<add>
<add>
<add>def test_create_from_words_and_text(vocab):
<add> # no whitespace in words
<add> words = ["'", "dogs", "'", "run"]
<add> text = " 'dogs'\n\nrun "
<add> (words, spaces) = util.get_words_and_spaces(words, text)
<add> doc = Doc(vocab, words=words, spaces=spaces)
<add> assert [t.text for t in doc] == [" ", "'", "dogs", "'", "\n\n", "run", " "]
<add> assert [t.whitespace_ for t in doc] == ["", "", "", "", "", " ", ""]
<add> assert doc.text == text
<add> assert [t.text for t in doc if not t.text.isspace()] == [word for word in words if not word.isspace()]
<add>
<add> # partial whitespace in words
<add> words = [" ", "'", "dogs", "'", "\n\n", "run", " "]
<add> text = " 'dogs'\n\nrun "
<add> (words, spaces) = util.get_words_and_spaces(words, text)
<add> doc = Doc(vocab, words=words, spaces=spaces)
<add> assert [t.text for t in doc] == [" ", "'", "dogs", "'", "\n\n", "run", " "]
<add> assert [t.whitespace_ for t in doc] == ["", "", "", "", "", " ", ""]
<add> assert doc.text == text
<add> assert [t.text for t in doc if not t.text.isspace()] == [word for word in words if not word.isspace()]
<add>
<add> # non-standard whitespace tokens
<add> words = [" ", " ", "'", "dogs", "'", "\n\n", "run"]
<add> text = " 'dogs'\n\nrun "
<add> (words, spaces) = util.get_words_and_spaces(words, text)
<add> doc = Doc(vocab, words=words, spaces=spaces)
<add> assert [t.text for t in doc] == [" ", "'", "dogs", "'", "\n\n", "run", " "]
<add> assert [t.whitespace_ for t in doc] == ["", "", "", "", "", " ", ""]
<add> assert doc.text == text
<add> assert [t.text for t in doc if not t.text.isspace()] == [word for word in words if not word.isspace()]
<add>
<add> # mismatch between words and text
<add> with pytest.raises(ValueError):
<add> words = [" ", " ", "'", "dogs", "'", "\n\n", "run"]
<add> text = " 'dogs'\n\nrun "
<add> (words, spaces) = util.get_words_and_spaces(words + ["away"], text)
<ide><path>spacy/util.py
<ide> def get_serialization_exclude(serializers, exclude, kwargs):
<ide> return exclude
<ide>
<ide>
<add>def get_words_and_spaces(words, text):
<add> if "".join("".join(words).split())!= "".join(text.split()):
<add> raise ValueError(Errors.E194.format(text=text, words=words))
<add> text_words = []
<add> text_spaces = []
<add> text_pos = 0
<add> # normalize words to remove all whitespace tokens
<add> norm_words = [word for word in words if not word.isspace()]
<add> # align words with text
<add> for word in norm_words:
<add> try:
<add> word_start = text[text_pos:].index(word)
<add> except ValueError:
<add> raise ValueError(Errors.E194.format(text=text, words=words))
<add> if word_start > 0:
<add> text_words.append(text[text_pos:text_pos+word_start])
<add> text_spaces.append(False)
<add> text_pos += word_start
<add> text_words.append(word)
<add> text_spaces.append(False)
<add> text_pos += len(word)
<add> if text_pos < len(text) and text[text_pos] == " ":
<add> text_spaces[-1] = True
<add> text_pos += 1
<add> if text_pos < len(text):
<add> text_words.append(text[text_pos:])
<add> text_spaces.append(False)
<add> return (text_words, text_spaces)
<add>
<add>
<ide> class SimpleFrozenDict(dict):
<ide> """Simplified implementation of a frozen dict, mainly used as default
<ide> function or method argument (for arguments that should default to empty | 3 |
Python | Python | remove named templates from the output | 16331cc1cf9b2cf3505d0c4742a854dc00923d9d | <ide><path>numpy/distutils/from_template.py
<ide> def find_repl_patterns(astr):
<ide> names[name] = thelist
<ide> return names
<ide>
<add>def find_and_remove_repl_patterns(astr):
<add> names = find_repl_patterns(astr)
<add> astr = re.subn(named_re, '', astr)[0]
<add> return astr, names
<add>
<ide> item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
<ide> def conv(astr):
<ide> b = astr.split(',')
<ide> def process_str(allstr):
<ide> names = {}
<ide> names.update(_special_names)
<ide> for sub in struct:
<del> writestr += newstr[oldend:sub[0]]
<del> names.update(find_repl_patterns(newstr[oldend:sub[0]]))
<add> cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]])
<add> writestr += cleanedstr
<add> names.update(defs)
<ide> writestr += expand_sub(newstr[sub[0]:sub[1]], names)
<ide> oldend = sub[1]
<ide> writestr += newstr[oldend:]
<ide><path>numpy/distutils/tests/test_from_template.py
<add>
<add>from numpy.distutils.from_template import process_str
<add>from numpy.testing import assert_equal, run_module_suite
<add>
<add>
<add>pyf_src = """
<add>python module foo
<add> <_rd=real,double precision>
<add> interface
<add> subroutine <s,d>foosub(tol)
<add> <_rd>, intent(in,out) :: tol
<add> end subroutine <s,d>foosub
<add> end interface
<add>end python module foo
<add>"""
<add>
<add>expected_pyf = """
<add>python module foo
<add> interface
<add> subroutine sfoosub(tol)
<add> real, intent(in,out) :: tol
<add> end subroutine sfoosub
<add> subroutine dfoosub(tol)
<add> double precision, intent(in,out) :: tol
<add> end subroutine dfoosub
<add> end interface
<add>end python module foo
<add>"""
<add>
<add>
<add>def normalize_whitespace(s):
<add> """
<add> Remove leading and trailing whitespace, and convert internal
<add> stretches of whitespace to a single space.
<add> """
<add> return ' '.join(s.split())
<add>
<add>
<add>def test_from_template():
<add> """Regression test for gh-10712."""
<add> pyf = process_str(pyf_src)
<add> normalized_pyf = normalize_whitespace(pyf)
<add> normalized_expected_pyf = normalize_whitespace(expected_pyf)
<add> assert_equal(normalized_pyf, normalized_expected_pyf)
<add>
<add>
<add>if __name__ == "__main__":
<add> run_module_suite() | 2 |
PHP | PHP | update language domains | d2badbeb22d2e557746f5d2f3d06480ab2ff0bdb | <ide><path>lib/Cake/View/Helper.php
<ide> public function __construct(View $View, $settings = array()) {
<ide> * @param array $params Array of params for the method.
<ide> */
<ide> public function __call($method, $params) {
<del> trigger_error(__d('cake', 'Method %1$s::%2$s does not exist', get_class($this), $method), E_USER_WARNING);
<add> trigger_error(__d('cake_error', 'Method %1$s::%2$s does not exist', get_class($this), $method), E_USER_WARNING);
<ide> }
<ide>
<ide> /**
<ide><path>lib/Cake/View/Helper/FormHelper.php
<ide> function create($model = null, $options = array()) {
<ide> $options);
<ide> $this->_inputDefaults = $options['inputDefaults'];
<ide> unset($options['inputDefaults']);
<del>
<add>
<ide> if (!isset($options['id'])) {
<ide> $domId = isset($options['action']) ? $options['action'] : $this->request['action'];
<ide> $options['id'] = $this->domId($domId . 'Form');
<ide> }
<del>
<add>
<ide> if ($options['action'] === null && $options['url'] === null) {
<ide> $options['action'] = $this->request->here(false);
<ide> } elseif (empty($options['url']) || is_array($options['url'])) {
<ide> public function inputs($fields = null, $blacklist = null) {
<ide> $actionName = __d('cake', 'Edit %s');
<ide> }
<ide> $modelName = Inflector::humanize(Inflector::underscore($model));
<del> $legend = sprintf($actionName, __d('cake', $modelName));
<add> $legend = sprintf($actionName, __($modelName));
<ide> }
<ide>
<ide> $out = null;
<ide> public function radio($fieldName, $options = array(), $attributes = array()) {
<ide> $legend = $attributes['legend'];
<ide> unset($attributes['legend']);
<ide> } elseif (count($options) > 1) {
<del> $legend = __d('cake', Inflector::humanize($this->field()));
<add> $legend = __(Inflector::humanize($this->field()));
<ide> }
<ide> $label = true;
<ide>
<ide> public function radio($fieldName, $options = array(), $attributes = array()) {
<ide> }
<ide>
<ide> /**
<del> * Missing method handler - implements various simple input types. Is used to create inputs
<add> * Missing method handler - implements various simple input types. Is used to create inputs
<ide> * of various types. e.g. `$this->Form->text();` will create `<input type="text" />` while
<ide> * `$this->Form->range();` will create `<input type="range" />`
<ide> *
<ide> public function radio($fieldName, $options = array(), $attributes = array()) {
<ide> public function __call($method, $params) {
<ide> $options = array();
<ide> if (empty($params)) {
<del> throw new CakeException(__d('cake', 'Missing field name for FormHelper::%s', $method));
<add> throw new CakeException(__d('cake_error', 'Missing field name for FormHelper::%s', $method));
<ide> }
<ide> if (isset($params[1])) {
<ide> $options = $params[1];
<ide> public function select($fieldName, $options = array(), $attributes = array()) {
<ide> $style = null;
<ide> $tag = null;
<ide> $attributes += array(
<del> 'class' => null,
<add> 'class' => null,
<ide> 'escape' => true,
<ide> 'secure' => null,
<ide> 'empty' => '',
<ide> protected function _name($options = array(), $field = null, $key = 'name') {
<ide> function __selectOptions($elements = array(), $parents = array(), $showParents = null, $attributes = array()) {
<ide> $select = array();
<ide> $attributes = array_merge(
<del> array('escape' => true, 'style' => null, 'value' => null, 'class' => null),
<add> array('escape' => true, 'style' => null, 'value' => null, 'class' => null),
<ide> $attributes
<ide> );
<ide> $selectedIsEmpty = ($attributes['value'] === '' || $attributes['value'] === null);
<ide> protected function _initInputField($field, $options = array()) {
<ide> } else {
<ide> $secure = (isset($this->request['_Token']) && !empty($this->request['_Token']));
<ide> }
<del>
<add>
<ide> $fieldName = null;
<ide> if ($secure && !empty($options['name'])) {
<ide> preg_match_all('/\[(.*?)\]/', $options['name'], $matches);
<ide> protected function _initInputField($field, $options = array()) {
<ide> }
<ide> return $result;
<ide> }
<del>}
<add>}
<ide>\ No newline at end of file
<ide><path>lib/Cake/View/Helper/HtmlHelper.php
<ide> public function loadConfig($configFile, $path = CONFIGS) {
<ide> $reader = $configFile[1];
<ide> }
<ide> } else {
<del> throw new ConfigureException(__d('cake', 'Cannot load the configuration file. Wrong "configFile" configuration.'));
<add> throw new ConfigureException(__d('cake_error', 'Cannot load the configuration file. Wrong "configFile" configuration.'));
<ide> }
<ide>
<ide> $readerClass = Inflector::camelize($reader) . 'Reader';
<ide> App::uses($readerClass, 'Configure');
<ide> if (!class_exists($readerClass)) {
<del> throw new ConfigureException(__d('cake', 'Cannot load the configuration file. Unknown reader.'));
<add> throw new ConfigureException(__d('cake_error', 'Cannot load the configuration file. Unknown reader.'));
<ide> }
<ide>
<ide> $readerObj = new $readerClass($path);
<ide><path>lib/Cake/View/Helper/JsHelper.php
<ide> public function __call($method, $params) {
<ide> if (method_exists($this, $method . '_')) {
<ide> return call_user_func(array(&$this, $method . '_'), $params);
<ide> }
<del> trigger_error(__d('cake', 'JsHelper:: Missing Method %s is undefined', $method), E_USER_WARNING);
<add> trigger_error(__d('cake_error', 'JsHelper:: Missing Method %s is undefined', $method), E_USER_WARNING);
<ide> }
<ide>
<ide> /**
<ide><path>lib/Cake/View/Helper/MootoolsEngineHelper.php
<ide> function drag($options = array()) {
<ide> function drop($options = array()) {
<ide> if (empty($options['drag'])) {
<ide> trigger_error(
<del> __d('cake', 'MootoolsEngine::drop() requires a "drag" option to properly function'), E_USER_WARNING
<add> __d('cake_error', 'MootoolsEngine::drop() requires a "drag" option to properly function'), E_USER_WARNING
<ide> );
<ide> return false;
<ide> }
<ide><path>lib/Cake/View/Helper/NumberHelper.php
<ide> public function format($number, $options = false) {
<ide> * ### Options
<ide> *
<ide> * - `before` - The currency symbol to place before whole numbers ie. '$'
<del> * - `after` - The currency symbol to place after decimal numbers ie. 'c'. Set to boolean false to
<add> * - `after` - The currency symbol to place after decimal numbers ie. 'c'. Set to boolean false to
<ide> * use no decimal symbol. eg. 0.35 => $0.35.
<ide> * - `zero` - The text to use for zero values, can be a string or a number. ie. 0, 'Free!'
<ide> * - `places` - Number of decimal places to use. ie. 2
<ide> public function currency($number, $currency = 'USD', $options = array()) {
<ide> }
<ide> }
<ide>
<del> $position = $options[$symbolKey.'Position'] != 'after' ? 'before' : 'after';
<add> $position = $options[$symbolKey.'Position'] != 'after' ? 'before' : 'after';
<ide> $options[$position] = $options[$symbolKey.'Symbol'];
<ide>
<ide> $abs = abs($number);
<ide> public function currency($number, $currency = 'USD', $options = array()) {
<ide> * currency formats easier.
<ide> *
<ide> * {{{ $number->addFormat('NOK', array('before' => 'Kr. ')); }}}
<del> *
<add> *
<ide> * You can now use `NOK` as a shortform when formatting currency amounts.
<ide> *
<ide> * {{{ $number->currency($value, 'NOK'); }}}
<ide> public function addFormat($formatName, $options) {
<ide> $this->_currencies[$formatName] = $options + $this->_currencyDefaults;
<ide> }
<ide>
<del>}
<add>}
<ide>\ No newline at end of file
<ide><path>lib/Cake/View/Helper/PaginatorHelper.php
<ide> class PaginatorHelper extends AppHelper {
<ide> * - `escape` Defines if the title field for the link should be escaped (default: true).
<ide> * - `update` DOM id of the element updated with the results of the AJAX call.
<ide> * If this key isn't specified Paginator will use plain HTML links.
<del> * - `paging['paramType']` The type of parameters to use when creating links. Valid options are
<add> * - `paging['paramType']` The type of parameters to use when creating links. Valid options are
<ide> * 'querystring', 'named', and 'route'. See PaginatorComponent::$settings for more information.
<ide> * - `convertKeys` - A list of keys in url arrays that should be converted to querysting params
<ide> * if paramType == 'querystring'.
<ide> function __construct(View $View, $settings = array()) {
<ide> $classname = $ajaxProvider . 'Helper';
<ide> if (!method_exists($classname, 'link')) {
<ide> throw new CakeException(sprintf(
<del> __d('cake', '%s does not implement a link() method, it is incompatible with PaginatorHelper'), $classname
<add> __d('cake_error', '%s does not implement a link() method, it is incompatible with PaginatorHelper'), $classname
<ide> ));
<ide> }
<ide> parent::__construct($View, $settings);
<ide> public function sort($key, $title = null, $options = array()) {
<ide>
<ide> if (empty($title)) {
<ide> $title = $key;
<del> $title = __d('cake', Inflector::humanize(preg_replace('/_id$/', '', $title)));
<add> $title = __(Inflector::humanize(preg_replace('/_id$/', '', $title)));
<ide> }
<ide> $dir = isset($options['direction']) ? $options['direction'] : 'asc';
<ide> unset($options['direction']);
<ide>
<ide> $sortKey = $this->sortKey($options['model']);
<ide> $defaultModel = $this->defaultModel();
<ide> $isSorted = (
<del> $sortKey === $key ||
<add> $sortKey === $key ||
<ide> $sortKey === $defaultModel . '.' . $key ||
<ide> $key === $defaultModel . '.' . $sortKey
<ide> );
<ide> public function counter($options = array()) {
<ide>
<ide> /**
<ide> * Returns a set of numbers for the paged result set
<del> * uses a modulus to decide how many numbers to show on each side of the current page (default: 8).
<add> * uses a modulus to decide how many numbers to show on each side of the current page (default: 8).
<ide> *
<ide> * `$this->Paginator->numbers(array('first' => 2, 'last' => 2));`
<ide> *
<ide> * Using the first and last options you can create links to the beginning and end of the page set.
<del> *
<add> *
<ide> *
<ide> * ### Options
<ide> *
<ide> public function last($last = 'last >>', $options = array()) {
<ide> }
<ide> return $out;
<ide> }
<del>}
<add>}
<ide>\ No newline at end of file
<ide><path>lib/Cake/View/Helper/TimeHelper.php
<ide> public function i18nFormat($date, $format = null, $invalid = false, $userOffset
<ide> $format = $this->convertSpecifiers($format, $date);
<ide> return strftime($format, $date);
<ide> }
<del>}
<add>}
<ide>\ No newline at end of file
<ide><path>lib/Cake/View/View.php
<ide> public function render($view = null, $layout = null) {
<ide> $layout = $this->layout;
<ide> }
<ide> if ($this->output === false) {
<del> throw new CakeException(__d('cake', "Error in view %s, got no content.", $viewFileName));
<add> throw new CakeException(__d('cake_error', "Error in view %s, got no content.", $viewFileName));
<ide> }
<ide> if ($layout && $this->autoLayout) {
<ide> $this->output = $this->renderLayout($this->output, $layout);
<ide> public function renderLayout($content_for_layout, $layout = null) {
<ide> $this->output = $this->_render($layoutFileName);
<ide>
<ide> if ($this->output === false) {
<del> throw new CakeException(__d('cake', "Error in layout %s, got no content.", $layoutFileName));
<add> throw new CakeException(__d('cake_error', "Error in layout %s, got no content.", $layoutFileName));
<ide> }
<ide>
<ide> $this->Helpers->trigger('afterLayout', array($layoutFileName)); | 9 |
Javascript | Javascript | add bigint formatting to util.inspect | 39dc947409c7df19ac4f9502df8cb0d4a1b2829a | <ide><path>lib/util.js
<ide> inspect.colors = Object.assign(Object.create(null), {
<ide> inspect.styles = Object.assign(Object.create(null), {
<ide> 'special': 'cyan',
<ide> 'number': 'yellow',
<add> 'bigint': 'yellow',
<ide> 'boolean': 'yellow',
<ide> 'undefined': 'grey',
<ide> 'null': 'bold',
<ide> function formatPrimitive(fn, value, ctx) {
<ide> }
<ide> if (typeof value === 'number')
<ide> return formatNumber(fn, value);
<add> // eslint-disable-next-line valid-typeof
<add> if (typeof value === 'bigint')
<add> return fn(`${value}n`, 'bigint');
<ide> if (typeof value === 'boolean')
<ide> return fn(`${value}`, 'boolean');
<ide> if (typeof value === 'undefined')
<ide><path>test/parallel/test-util-inspect-bigint.js
<add>'use strict';
<add>
<add>// Flags: --harmony-bigint
<add>
<add>require('../common');
<add>const assert = require('assert');
<add>
<add>const { inspect } = require('util');
<add>
<add>assert.strictEqual(inspect(1n), '1n'); | 2 |
Javascript | Javascript | fix lint and deopt issues | 7d1d3a6621b9a6599b0ee139964afd6b2ca38c6d | <ide><path>lib/url.js
<ide> Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
<ide> var end = -1;
<ide> var rest = '';
<ide> var lastPos = 0;
<del> for (var i = 0, inWs = false, split = false; i < url.length; ++i) {
<del> var code = url.charCodeAt(i);
<add> var i = 0;
<add> for (var inWs = false, split = false; i < url.length; ++i) {
<add> const code = url.charCodeAt(i);
<ide>
<ide> // Find first and last non-whitespace characters for trimming
<del> var isWs = code === 32/* */ ||
<del> code === 9/*\t*/ ||
<del> code === 13/*\r*/ ||
<del> code === 10/*\n*/ ||
<del> code === 12/*\f*/ ||
<del> code === 160/*\u00A0*/ ||
<del> code === 65279/*\uFEFF*/;
<add> const isWs = code === 32/* */ ||
<add> code === 9/*\t*/ ||
<add> code === 13/*\r*/ ||
<add> code === 10/*\n*/ ||
<add> code === 12/*\f*/ ||
<add> code === 160/*\u00A0*/ ||
<add> code === 65279/*\uFEFF*/;
<ide> if (start === -1) {
<ide> if (isWs)
<ide> continue;
<ide> Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
<ide>
<ide> if (!slashesDenoteHost && !hasHash) {
<ide> // Try fast path regexp
<del> var simplePath = simplePathPattern.exec(rest);
<add> const simplePath = simplePathPattern.exec(rest);
<ide> if (simplePath) {
<ide> this.path = rest;
<ide> this.href = rest;
<ide> Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
<ide> var hostEnd = -1;
<ide> var atSign = -1;
<ide> var nonHost = -1;
<del> for (var i = 0; i < rest.length; ++i) {
<add> for (i = 0; i < rest.length; ++i) {
<ide> switch (rest.charCodeAt(i)) {
<ide> case 9: // '\t'
<ide> case 10: // '\n'
<ide> Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
<ide> if (hostEnd !== -1)
<ide> break;
<ide> }
<del> var start = 0;
<add> start = 0;
<ide> if (atSign !== -1) {
<ide> this.auth = decodeURIComponent(rest.slice(0, atSign));
<ide> start = atSign + 1;
<ide> Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
<ide> hostname.charCodeAt(hostname.length - 1) === 93/*]*/;
<ide>
<ide> // validate a little.
<del> var result;
<ide> if (!ipv6Hostname) {
<del> result = validateHostname(this, rest, hostname);
<add> const result = validateHostname(this, rest, hostname);
<ide> if (result !== undefined)
<ide> rest = result;
<ide> }
<ide> Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
<ide> // First, make 100% sure that any "autoEscape" chars get
<ide> // escaped, even if encodeURIComponent doesn't think they
<ide> // need to be.
<del> result = autoEscapeStr(rest);
<add> const result = autoEscapeStr(rest);
<ide> if (result !== undefined)
<ide> rest = result;
<ide> }
<ide>
<ide> var questionIdx = -1;
<ide> var hashIdx = -1;
<del> for (var i = 0; i < rest.length; ++i) {
<del> var code = rest.charCodeAt(i);
<add> for (i = 0; i < rest.length; ++i) {
<add> const code = rest.charCodeAt(i);
<ide> if (code === 35/*#*/) {
<ide> this.hash = rest.slice(i);
<ide> hashIdx = i;
<ide> Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
<ide>
<ide> // to support http.request
<ide> if (this.pathname || this.search) {
<del> var p = this.pathname || '';
<del> var s = this.search || '';
<add> const p = this.pathname || '';
<add> const s = this.search || '';
<ide> this.path = p + s;
<ide> }
<ide>
<ide> Url.prototype.resolveObject = function(relative) {
<ide> return result;
<ide> }
<ide>
<del> const isSourceAbs = (result.pathname && result.pathname.charAt(0) === '/');
<del> const isRelAbs = (
<add> var isSourceAbs = (result.pathname && result.pathname.charAt(0) === '/');
<add> var isRelAbs = (
<ide> relative.host ||
<ide> relative.pathname && relative.pathname.charAt(0) === '/'
<ide> );
<ide> var mustEndAbs = (isRelAbs || isSourceAbs ||
<ide> (result.host && relative.pathname));
<del> const removeAllDots = mustEndAbs;
<add> var removeAllDots = mustEndAbs;
<ide> var srcPath = result.pathname && result.pathname.split('/') || [];
<del> const relPath = relative.pathname && relative.pathname.split('/') || [];
<del> const psychotic = result.protocol && !slashedProtocol[result.protocol];
<add> var relPath = relative.pathname && relative.pathname.split('/') || [];
<add> var psychotic = result.protocol && !slashedProtocol[result.protocol];
<ide>
<ide> // if the url is a non-slashed url, then relative
<ide> // links like ../.. should be able | 1 |
Javascript | Javascript | add support for react.pure in reactdomserver | 8ecd4bd4f07ea41baa934e6748649e447c999231 | <ide><path>packages/react-dom/src/__tests__/ReactDOMServerIntegrationSpecialTypes-test.js
<ide> const ReactDOMServerIntegrationUtils = require('./utils/ReactDOMServerIntegratio
<ide> let React;
<ide> let ReactDOM;
<ide> let ReactDOMServer;
<add>let forwardRef;
<add>let pure;
<add>let yieldedValues;
<add>let yieldValue;
<add>let clearYields;
<ide>
<ide> function initModules() {
<ide> // Reset warning cache.
<ide> jest.resetModuleRegistry();
<ide> React = require('react');
<ide> ReactDOM = require('react-dom');
<ide> ReactDOMServer = require('react-dom/server');
<add> forwardRef = React.forwardRef;
<add> pure = React.pure;
<add>
<add> yieldedValues = [];
<add> yieldValue = value => {
<add> yieldedValues.push(value);
<add> };
<add> clearYields = () => {
<add> const ret = yieldedValues;
<add> yieldedValues = [];
<add> return ret;
<add> };
<ide>
<ide> // Make them available to the helpers.
<ide> return {
<ide> describe('ReactDOMServerIntegration', () => {
<ide> const FunctionComponent = ({label, forwardedRef}) => (
<ide> <div ref={forwardedRef}>{label}</div>
<ide> );
<del> const WrappedFunctionComponent = React.forwardRef((props, ref) => (
<add> const WrappedFunctionComponent = forwardRef((props, ref) => (
<ide> <FunctionComponent {...props} forwardedRef={ref} />
<ide> ));
<ide>
<ide> describe('ReactDOMServerIntegration', () => {
<ide> expect(div.tagName).toBe('DIV');
<ide> expect(div.textContent).toBe('Test');
<ide> });
<add>
<add> describe('pure functional components', () => {
<add> beforeEach(() => {
<add> resetModules();
<add> });
<add>
<add> function Text({text}) {
<add> yieldValue(text);
<add> return <span>{text}</span>;
<add> }
<add>
<add> function Counter({count}) {
<add> return <Text text={'Count: ' + count} />;
<add> }
<add>
<add> itRenders('basic render', async render => {
<add> const PureCounter = pure(Counter);
<add> const domNode = await render(<PureCounter count={0} />);
<add> expect(domNode.textContent).toEqual('Count: 0');
<add> });
<add>
<add> itRenders('composition with forwardRef', async render => {
<add> const RefCounter = (props, ref) => <Counter count={ref.current} />;
<add> const PureRefCounter = pure(forwardRef(RefCounter));
<add>
<add> const ref = React.createRef();
<add> ref.current = 0;
<add> await render(<PureRefCounter ref={ref} />);
<add>
<add> expect(clearYields()).toEqual(['Count: 0']);
<add> });
<add>
<add> itRenders('with comparator', async render => {
<add> const PureCounter = pure(Counter, (oldProps, newProps) => false);
<add> await render(<PureCounter count={0} />);
<add> expect(clearYields()).toEqual(['Count: 0']);
<add> });
<add>
<add> itRenders(
<add> 'comparator functions are not invoked on the server',
<add> async render => {
<add> const PureCounter = React.pure(Counter, (oldProps, newProps) => {
<add> yieldValue(
<add> `Old count: ${oldProps.count}, New count: ${newProps.count}`,
<add> );
<add> return oldProps.count === newProps.count;
<add> });
<add>
<add> await render(<PureCounter count={0} />);
<add> expect(clearYields()).toEqual(['Count: 0']);
<add> },
<add> );
<add> });
<ide> });
<ide><path>packages/react-dom/src/server/ReactPartialRenderer.js
<ide> import {
<ide> REACT_PROVIDER_TYPE,
<ide> REACT_CONTEXT_TYPE,
<ide> REACT_LAZY_TYPE,
<add> REACT_PURE_TYPE,
<ide> } from 'shared/ReactSymbols';
<ide>
<ide> import {
<ide> class ReactDOMServerRenderer {
<ide> this.stack.push(frame);
<ide> return '';
<ide> }
<add> case REACT_PURE_TYPE: {
<add> const element: ReactElement = ((nextChild: any): ReactElement);
<add> let nextChildren = [
<add> React.createElement(
<add> elementType.type,
<add> Object.assign({ref: element.ref}, element.props),
<add> ),
<add> ];
<add> const frame: Frame = {
<add> type: null,
<add> domNamespace: parentNamespace,
<add> children: nextChildren,
<add> childIndex: 0,
<add> context: context,
<add> footer: '',
<add> };
<add> if (__DEV__) {
<add> ((frame: any): FrameDev).debugElementStack = [];
<add> }
<add> this.stack.push(frame);
<add> return '';
<add> }
<ide> case REACT_PROVIDER_TYPE: {
<ide> const provider: ReactProvider<any> = (nextChild: any);
<ide> const nextProps = provider.props; | 2 |
Go | Go | send sigterm to child instead of sigkill | 39037a91f85a4a072e5aa7e585d8c2f6b211df8a | <ide><path>pkg/libcontainer/nsinit/init.go
<ide> func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol
<ide> return fmt.Errorf("setctty %s", err)
<ide> }
<ide> }
<del> if err := system.ParentDeathSignal(); err != nil {
<add> // this is our best effort to let the process know that the parent has died and that it
<add> // should it should act on it how it sees fit
<add> if err := system.ParentDeathSignal(uintptr(syscall.SIGTERM)); err != nil {
<ide> return fmt.Errorf("parent death signal %s", err)
<ide> }
<ide> if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot); err != nil {
<ide><path>pkg/system/calls_linux.go
<ide> func Mknod(path string, mode uint32, dev int) error {
<ide> return syscall.Mknod(path, mode, dev)
<ide> }
<ide>
<del>func ParentDeathSignal() error {
<del> if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, uintptr(syscall.SIGKILL), 0); err != 0 {
<add>func ParentDeathSignal(sig uintptr) error {
<add> if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 {
<ide> return err
<ide> }
<ide> return nil | 2 |
Text | Text | add inline code | 202ca8efc2c9a28e4ac4be1075cb568f57ef417a | <ide><path>guide/english/r/functions/index.md
<ide> A function allows you to define a reusable block of code that can be executed ma
<ide>
<ide> Functions can be named and called repeatedly or can be run anonymously in place (similar to lambda functions in python).
<ide>
<del>Developing full understanding of R functions requires understanding of environments.
<add>Developing a full understanding of R functions requires understanding of environments.
<ide> Environments are simply a way to manage objects. An example of environments in action is that you can use a redundant variable
<ide> name within a function, that won't be affected if the larger runtime already has the same variable. Additionally, if a
<ide> function calls a variable which is not defined within the function, it will check the higher level environment for that variable.
<ide> its factorial with the `factorial()`.
<ide> [1] 6
<ide> ```
<ide> - If you’re not sure which names to use with a function, you can look up the function’s
<del>arguments with args.
<add>arguments with `args()`. For example:
<ide>
<ide> ```r
<ide> > args(round)
<ide> [1] function(x, digits=0)
<ide> ```
<ide>
<del>## Resources
<del>
<del> * [Official Docs](https://cran.r-project.org/manuals.html)
<del> * [Quick-R](https://www.statmethods.net/management/functions.html)
<del> * [Advanced R: Functions](http://adv-r.had.co.nz/Functions.html)
<add>## Additional Resources
<add> - [Official Docs](https://cran.r-project.org/manuals.html)
<add> - [Quick-R](https://www.statmethods.net/management/functions.html)
<add> - [Advanced R: Functions](http://adv-r.had.co.nz/Functions.html)
<add> - [CRAN](https://cran.r-project.org/doc/manuals/r-release/R-lang.html#Functions) | 1 |
Text | Text | update redirects in readme | c9dc9707c875bb9b33405005b70e702b0fe69b69 | <ide><path>README.md
<ide> I wrote Redux while working on my React Europe talk called [“Hot Reloading wit
<ide>
<ide> ### Influences
<ide>
<del>Redux evolves the ideas of [Flux](https://facebook.github.io/flux), but avoids its complexity by taking cues from [Elm](https://github.com/evancz/elm-architecture-tutorial/).
<add>Redux evolves the ideas of [Flux](http://facebook.github.io/flux/), but avoids its complexity by taking cues from [Elm](https://github.com/evancz/elm-architecture-tutorial/).
<ide> Whether you have used them or not, Redux only takes a few minutes to get started with.
<ide>
<ide> ### Installation
<ide> To install the stable version:
<ide> npm install --save redux
<ide> ```
<ide>
<del>Most likely, you’ll also need [the React bindings](http://github.com/gaearon/react-redux) and [the developer tools](http://github.com/gaearon/redux-devtools).
<add>Most likely, you’ll also need [the React bindings](https://github.com/rackt/react-redux) and [the developer tools](https://github.com/gaearon/redux-devtools).
<ide>
<ide> ```
<ide> npm install --save react-redux
<ide> npm install --save-dev redux-devtools
<ide> ```
<ide>
<del>This assumes that you’re using [npm](http://npmjs.com/) package manager with a module bundler like [Webpack](http://webpack.github.io) or [Browserify](http://browserify.org/) to consume [CommonJS modules](http://webpack.github.io/docs/commonjs.html).
<add>This assumes that you’re using [npm](https://www.npmjs.com/) package manager with a module bundler like [Webpack](http://webpack.github.io) or [Browserify](http://browserify.org/) to consume [CommonJS modules](http://webpack.github.io/docs/commonjs.html).
<ide>
<del>If you don’t yet use [npm](http://npmjs.com/) or a modern module bundler, and would rather prefer a single-file [UMD](https://github.com/umdjs/umd) build that makes `Redux` available as a global object, you can grab a pre-built version from [cdnjs](https://cdnjs.com/libraries/redux). We *don’t* recommend this approach for any serious application, as most of the libraries complementary to Redux are only available on [npm](http://npmjs.com/).
<add>If you don’t yet use [npm](https://www.npmjs.com/) or a modern module bundler, and would rather prefer a single-file [UMD](https://github.com/umdjs/umd) build that makes `Redux` available as a global object, you can grab a pre-built version from [cdnjs](https://cdnjs.com/libraries/redux). We *don’t* recommend this approach for any serious application, as most of the libraries complementary to Redux are only available on [npm](https://www.npmjs.com/).
<ide>
<ide> ### The Gist
<ide>
<ide> If you’re new to the NPM ecosystem and have troubles getting a project up and
<ide>
<ide> ### Discussion
<ide>
<del>Join the [#redux](https://discord.gg/0ZcbPKXt5bZ6au5t) channel of the [Reactiflux](http://reactiflux.com) Discord community.
<add>Join the [#redux](https://discord.gg/0ZcbPKXt5bZ6au5t) channel of the [Reactiflux](http://www.reactiflux.com) Discord community.
<ide>
<ide> ### Thanks
<ide>
<ide> * [The Elm Architecture](https://github.com/evancz/elm-architecture-tutorial) for a great intro to modeling state updates with reducers;
<del>* [Turning the database inside-out](http://blog.confluent.io/2015/03/04/turning-the-database-inside-out-with-apache-samza/) for blowing my mind;
<del>* [Developing ClojureScript with Figwheel](http://www.youtube.com/watch?v=j-kj2qwJa_E) for convincing me that re-evaluation should “just work”;
<add>* [Turning the database inside-out](http://www.confluent.io/blog/turning-the-database-inside-out-with-apache-samza/) for blowing my mind;
<add>* [Developing ClojureScript with Figwheel](https://www.youtube.com/watch?v=j-kj2qwJa_E) for convincing me that re-evaluation should “just work”;
<ide> * [Webpack](https://github.com/webpack/docs/wiki/hot-module-replacement-with-webpack) for Hot Module Replacement;
<ide> * [Flummox](https://github.com/acdlite/flummox) for teaching me to approach Flux without boilerplate or singletons;
<ide> * [disto](https://github.com/threepointone/disto) for a proof of concept of hot reloadable Stores;
<ide> * [NuclearJS](https://github.com/optimizely/nuclear-js) for proving this architecture can be performant;
<ide> * [Om](https://github.com/omcljs/om) for popularizing the idea of a single state atom;
<del>* [Cycle](https://github.com/staltz/cycle) for showing how often a function is the best tool;
<add>* [Cycle](https://github.com/cyclejs/cycle-core) for showing how often a function is the best tool;
<ide> * [React](https://github.com/facebook/react) for the pragmatic innovation.
<ide>
<ide> Special thanks to [Jamie Paton](http://jdpaton.github.io) for handing over the `redux` NPM package name.
<ide> Every release, along with the migration instructions, is documented on the Githu
<ide> The work on Redux was [funded by the community](https://www.patreon.com/reactdx).
<ide> Meet some of the outstanding companies that made it possible:
<ide>
<del>* [Webflow](http://webflow.com/)
<add>* [Webflow](https://webflow.com/)
<ide> * [Chess iX](http://www.chess-ix.com/)
<ide>
<ide> [See the full list of Redux patrons.](PATRONS.md) | 1 |
Javascript | Javascript | remove disabled test | 03216760ec858f3257e4e009a40f7f9a62b41510 | <ide><path>test/ng/directive/inputSpec.js
<ide> describe('input', function() {
<ide> });
<ide>
<ide>
<del> xit('should require at least one item', function() {
<del> compileInput('<input type="text" ng-model="list" ng-list required />');
<del>
<del> changeInputValueTo(' , ');
<del> expect(inputElm).toBeInvalid();
<del> });
<del>
<del>
<ide> it('should convert empty string to an empty array', function() {
<ide> compileInput('<input type="text" ng-model="list" ng-list />');
<ide> | 1 |
Python | Python | add vectors to language.meta | 8e022942413f65a7b28ea45fa92ba687db76d1f9 | <ide><path>spacy/language.py
<ide> def meta(self):
<ide> self._meta.setdefault('email', '')
<ide> self._meta.setdefault('url', '')
<ide> self._meta.setdefault('license', '')
<add> self._meta['vectors'] = {'width': self.vocab.vectors_length,
<add> 'entries': len(self.vocab.vectors)}
<ide> self._meta['pipeline'] = self.pipe_names
<ide> return self._meta
<ide> | 1 |
Ruby | Ruby | silence another test that runs migrations | 9bb495e6ae4de6d8c22f5e0cf81fbd86d95362e2 | <ide><path>activerecord/test/cases/invertible_migration_test.rb
<ide> def change
<ide> end
<ide> end
<ide>
<add> setup do
<add> @verbose_was, ActiveRecord::Migration.verbose = ActiveRecord::Migration.verbose, false
<add> end
<add>
<ide> teardown do
<ide> %w[horses new_horses].each do |table|
<ide> if ActiveRecord::Base.connection.table_exists?(table)
<ide> ActiveRecord::Base.connection.drop_table(table)
<ide> end
<ide> end
<add> ActiveRecord::Migration.verbose = @verbose_was
<ide> end
<ide>
<ide> def test_no_reverse | 1 |
PHP | PHP | update docblock | bba47876777b036d76811720769c681d6102e067 | <ide><path>src/Illuminate/Auth/Middleware/Authenticate.php
<ide> protected function unauthenticated($request, array $guards)
<ide> * Get the path the user should be redirected to when they are not authenticated.
<ide> *
<ide> * @param \Illuminate\Http\Request $request
<del> * @return string
<add> * @return string|null
<ide> */
<ide> protected function redirectTo($request)
<ide> { | 1 |
Text | Text | add "合约协议" in the articles | beaa609da7c7e7eb9dbfd16494951d3bc13bddda | <ide><path>guide/chinese/blockchain/index.md
<ide> localeTitle: 块链
<ide> >
<ide> > \--Bettina Warburg 1
<ide>
<del>区块链通常与比特币和其他加密货币相关联,但它们不是一回事。比特币是第一个实施区块链概念的人。区块链的结构,不断增长的记录列表,可以应用于许多其他领域,如数字身份,供应链甚至[民主](https://www.democracy.earth/) 。
<add>区块链通常与比特币和其他加密货币相关联,但它们不是一回事。比特币是第一个实施区块链概念的人。区块链的结构,不断增长的记录列表,可以应用于许多其他领域,如数字身份、合约协议、供应链甚至[民主](https://www.democracy.earth/) 。
<ide>
<ide> 尽管区块链可以应用于大范围的问题,但它并不是解决所有问题的方法。该技术通常用于解决部件彼此不信任的问题。
<ide>
<ide> localeTitle: 块链
<ide> * [区块链如何改变金钱和业务| Don Tapscott(YouTube视频)](https://www.youtube.com/watch?v=Pl8OlkkwRpc)
<ide> * [比特币,以太坊和智能合约简介](https://github.com/WizardOfAus/WizardsEthereumWorkshop)
<ide> * [区块链学术论文](https://github.com/decrypto-org/blockchain-papers)
<del>* [区块链资源](https://github.com/BlockchainDevs/CryptocurrencyAwesome/blob/master/README.md)
<ide>\ No newline at end of file
<add>* [区块链资源](https://github.com/BlockchainDevs/CryptocurrencyAwesome/blob/master/README.md) | 1 |
Ruby | Ruby | remove reset! as a connection#checkout callback | 9c3c42d8ea489ff50fceebe0d37c86f9291fece5 | <ide><path>activerecord/lib/active_record/connection_adapters/abstract_adapter.rb
<ide> class AbstractAdapter
<ide> include QueryCache
<ide> include ActiveSupport::Callbacks
<ide> define_callbacks :checkout, :checkin
<del> checkout :reset!
<add>
<ide> @@row_even = true
<ide>
<ide> def initialize(connection, logger = nil) #:nodoc: | 1 |
PHP | PHP | accept closure for sleepmilliseconds | 035a0b2e2da129bac2756b1ee3ee10bb8bcda569 | <ide><path>src/Illuminate/Support/helpers.php
<ide> function preg_replace_array($pattern, array $replacements, $subject)
<ide> *
<ide> * @param int $times
<ide> * @param callable $callback
<del> * @param int $sleepMilliseconds
<add> * @param int|\Closure $sleepMilliseconds
<ide> * @param callable|null $when
<ide> * @return mixed
<ide> *
<ide> function retry($times, callable $callback, $sleepMilliseconds = 0, $when = null)
<ide> }
<ide>
<ide> if ($sleepMilliseconds) {
<del> usleep($sleepMilliseconds * 1000);
<add> usleep(value($sleepMilliseconds, $attempts) * 1000);
<ide> }
<ide>
<ide> goto beginning;
<ide><path>tests/Support/SupportHelpersTest.php
<ide> public function testRetry()
<ide> $this->assertEqualsWithDelta(0.1, microtime(true) - $startTime, 0.02);
<ide> }
<ide>
<add> public function testRetryWithPassingSleepCallback()
<add> {
<add> $startTime = microtime(true);
<add>
<add> $attempts = retry(3, function ($attempts) {
<add> if ($attempts > 2) {
<add> return $attempts;
<add> }
<add>
<add> throw new RuntimeException;
<add> }, function ($attempt) {
<add> return $attempt * 100;
<add> });
<add>
<add> // Make sure we made three attempts
<add> $this->assertEquals(3, $attempts);
<add>
<add> // Make sure we waited 300ms for the first two attempts
<add> $this->assertEqualsWithDelta(0.3, microtime(true) - $startTime, 0.02);
<add> }
<add>
<ide> public function testRetryWithPassingWhenCallback()
<ide> {
<ide> $startTime = microtime(true); | 2 |
Text | Text | add missing imports in events sample code | ad5ea5f9a5fa8b514227c3553e1a3cf361f2ddae | <ide><path>doc/api/events.md
<ide> require manual async tracking. Specifically, all events emitted by instances
<ide> of `events.EventEmitterAsyncResource` will run within its [async context][].
<ide>
<ide> ```mjs
<del>import { EventEmitterAsyncResource } from 'node:events';
<add>import { EventEmitterAsyncResource, EventEmitter } from 'node:events';
<ide> import { notStrictEqual, strictEqual } from 'node:assert';
<del>import { executionAsyncId } from 'node:async_hooks';
<add>import { executionAsyncId, triggerAsyncId } from 'node:async_hooks';
<ide>
<ide> // Async tracking tooling will identify this as 'Q'.
<ide> const ee1 = new EventEmitterAsyncResource({ name: 'Q' });
<ide> Promise.resolve().then(() => {
<ide> ```
<ide>
<ide> ```cjs
<del>const { EventEmitterAsyncResource } = require('node:events');
<add>const { EventEmitterAsyncResource, EventEmitter } = require('node:events');
<ide> const { notStrictEqual, strictEqual } = require('node:assert');
<del>const { executionAsyncId } = require('node:async_hooks');
<add>const { executionAsyncId, triggerAsyncId } = require('node:async_hooks');
<ide>
<ide> // Async tracking tooling will identify this as 'Q'.
<ide> const ee1 = new EventEmitterAsyncResource({ name: 'Q' }); | 1 |
Python | Python | adjust names and formatting | 2185d31907041f4e4c8856bfcb8635998648571b | <ide><path>spacy/tests/tokenizer/test_urls.py
<ide> "mailto:foo-bar@baz-co.com"
<ide> ]
<ide>
<add>
<ide> # Punctuation we want to check is split away before the URL
<ide> PREFIXES = [
<ide> "(", '"', ">"
<ide> ]
<ide>
<add>
<ide> # Punctuation we want to check is split away after the URL
<ide> SUFFIXES = [
<ide> '"', ":", ">"]
<ide>
<del>@pytest.mark.parametrize("text", URLS)
<del>def test_simple_url(tokenizer, text):
<del> tokens = tokenizer(text)
<del> assert tokens[0].orth_ == text
<add>
<add>@pytest.mark.parametrize("url", URLS)
<add>def test_tokenizer_handles_simple_url(tokenizer, url):
<add> tokens = tokenizer(url)
<ide> assert len(tokens) == 1
<add> assert tokens[0].text == url
<ide>
<ide>
<ide> @pytest.mark.parametrize("prefix", PREFIXES)
<ide> @pytest.mark.parametrize("url", URLS)
<del>def test_prefixed_url(tokenizer, prefix, url):
<add>def test_tokenizer_handles_prefixed_url(tokenizer, prefix, url):
<ide> tokens = tokenizer(prefix + url)
<ide> assert tokens[0].text == prefix
<ide> assert tokens[1].text == url
<ide> assert len(tokens) == 2
<del>
<add>
<add>
<ide> @pytest.mark.parametrize("suffix", SUFFIXES)
<ide> @pytest.mark.parametrize("url", URLS)
<del>def test_suffixed_url(tokenizer, url, suffix):
<add>def test_tokenizer_handles_suffixed_url(tokenizer, url, suffix):
<ide> tokens = tokenizer(url + suffix)
<ide> assert tokens[0].text == url
<ide> assert tokens[1].text == suffix
<ide> assert len(tokens) == 2
<del>
<add>
<add>
<ide> @pytest.mark.parametrize("prefix", PREFIXES)
<ide> @pytest.mark.parametrize("suffix", SUFFIXES)
<ide> @pytest.mark.parametrize("url", URLS)
<del>def test_surround_url(tokenizer, prefix, suffix, url):
<add>def test_tokenizer_handles_surround_url(tokenizer, prefix, suffix, url):
<ide> tokens = tokenizer(prefix + url + suffix)
<ide> assert tokens[0].text == prefix
<ide> assert tokens[1].text == url
<ide> assert tokens[2].text == suffix
<del> assert len(tokens) == 3
<del>
<add>
<add>
<ide> @pytest.mark.parametrize("prefix1", PREFIXES)
<ide> @pytest.mark.parametrize("prefix2", PREFIXES)
<ide> @pytest.mark.parametrize("url", URLS)
<del>def test_two_prefix_url(tokenizer, prefix1, prefix2, url):
<add>def test_tokenizer_handles_two_prefix_url(tokenizer, prefix1, prefix2, url):
<ide> tokens = tokenizer(prefix1 + prefix2 + url)
<ide> assert tokens[0].text == prefix1
<ide> assert tokens[1].text == prefix2
<ide> assert tokens[2].text == url
<ide> assert len(tokens) == 3
<del>
<add>
<add>
<ide> @pytest.mark.parametrize("suffix1", SUFFIXES)
<ide> @pytest.mark.parametrize("suffix2", SUFFIXES)
<ide> @pytest.mark.parametrize("url", URLS)
<del>def test_two_prefix_url(tokenizer, suffix1, suffix2, url):
<add>def test_tokenizer_handles_two_prefix_url(tokenizer, suffix1, suffix2, url):
<ide> tokens = tokenizer(url + suffix1 + suffix2)
<ide> assert tokens[0].text == url
<ide> assert tokens[1].text == suffix1 | 1 |
PHP | PHP | add array support to optional | 2564437750326edfe5b55da57ae295a770f4c9e5 | <ide><path>src/Illuminate/Support/Optional.php
<ide>
<ide> namespace Illuminate\Support;
<ide>
<del>class Optional
<add>use ArrayAccess;
<add>
<add>class Optional implements ArrayAccess
<ide> {
<ide> use Traits\Macroable {
<ide> __call as macroCall;
<ide> public function __call($method, $parameters)
<ide> return $this->value->{$method}(...$parameters);
<ide> }
<ide> }
<add>
<add> /**
<add> * Determine if an item exists at an offset.
<add> *
<add> * @param mixed $key
<add> * @return bool
<add> */
<add> public function offsetExists($key)
<add> {
<add> return Arr::accessible($this->value) && Arr::exists($this->value, $key);
<add> }
<add>
<add> /**
<add> * Get an item at a given offset.
<add> *
<add> * @param mixed $key
<add> * @return mixed
<add> */
<add> public function offsetGet($key)
<add> {
<add> return Arr::get($this->value, $key);
<add> }
<add>
<add> /**
<add> * Set the item at a given offset.
<add> *
<add> * @param mixed $key
<add> * @param mixed $value
<add> * @return void
<add> */
<add> public function offsetSet($key, $value)
<add> {
<add> if (Arr::accessible($this->value)) {
<add> $this->value[$key] = $value;
<add> }
<add> }
<add>
<add> /**
<add> * Unset the item at a given offset.
<add> *
<add> * @param string $key
<add> * @return void
<add> */
<add> public function offsetUnset($key)
<add> {
<add> if (Arr::accessible($this->value)) {
<add> unset($this->value[$key]);
<add> }
<add> }
<ide> }
<ide><path>tests/Support/SupportHelpersTest.php
<ide> public function something()
<ide> })->something());
<ide> }
<ide>
<add> public function testOptionalWithArray()
<add> {
<add> $this->assertNull(optional(null)['missing']);
<add>
<add> $this->assertEquals('here', optional(['present' => 'here'])['present']);
<add> }
<add>
<ide> public function testOptionalIsMacroable()
<ide> {
<ide> Optional::macro('present', function () { | 2 |
Java | Java | add null check after message conversion | da369aa8266c96d2fea635e39d5b5145b9e3d2ec | <ide><path>spring-messaging/src/main/java/org/springframework/messaging/converter/CompositeMessageConverter.java
<ide> /*
<del> * Copyright 2002-2013 the original author or authors.
<add> * Copyright 2002-2014 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> public Message<?> toMessage(Object payload, MessageHeaders headers) {
<ide> return null;
<ide> }
<ide>
<add> @Override
<add> public String toString() {
<add> return "CompositeMessageConverter[contentTypeResolver=" + this.contentTypeResolver +
<add> ", converters=" + this.converters + "]";
<add> }
<ide> }
<ide><path>spring-messaging/src/main/java/org/springframework/messaging/converter/DefaultContentTypeResolver.java
<ide> /*
<del> * Copyright 2002-2013 the original author or authors.
<add> * Copyright 2002-2014 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> public MimeType resolve(MessageHeaders headers) {
<ide> return (mimeType != null) ? mimeType : this.defaultMimeType;
<ide> }
<ide>
<add> @Override
<add> public String toString() {
<add> return "DefaultContentTypeResolver[" + "defaultMimeType=" + this.defaultMimeType + "]";
<add> }
<ide> }
<ide><path>spring-messaging/src/main/java/org/springframework/messaging/core/AbstractMessageSendingTemplate.java
<ide> /*
<del> * Copyright 2002-2013 the original author or authors.
<add> * Copyright 2002-2014 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> import org.springframework.messaging.Message;
<ide> import org.springframework.messaging.MessageHeaders;
<ide> import org.springframework.messaging.MessagingException;
<add>import org.springframework.messaging.converter.MessageConversionException;
<ide> import org.springframework.messaging.converter.MessageConverter;
<ide> import org.springframework.messaging.converter.SimpleMessageConverter;
<ide> import org.springframework.util.Assert;
<ide> public void convertAndSend(D destination, Object payload, Map<String, Object> he
<ide>
<ide> MessageHeaders messageHeaders = (headers != null) ? new MessageHeaders(headers) : null;
<ide> Message<?> message = this.converter.toMessage(payload, messageHeaders);
<add>
<add> if (message == null) {
<add> String payloadType = (payload != null) ? payload.getClass().getName() : null;
<add> throw new MessageConversionException("Unable to convert payload type '"
<add> + payloadType + "', Content-Type=" + messageHeaders.get(MessageHeaders.CONTENT_TYPE)
<add> + ", converter=" + this.converter, null);
<add> }
<add>
<ide> if (postProcessor != null) {
<ide> message = postProcessor.postProcessMessage(message);
<ide> }
<ide><path>spring-messaging/src/main/java/org/springframework/messaging/core/AbstractMessagingTemplate.java
<ide> /*
<del> * Copyright 2002-2013 the original author or authors.
<add> * Copyright 2002-2014 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide>
<ide> import org.springframework.messaging.Message;
<ide> import org.springframework.messaging.MessageHeaders;
<add>import org.springframework.messaging.converter.MessageConversionException;
<ide>
<ide> /**
<ide> * An extension of {@link AbstractMessageSendingTemplate} that adds support for
<ide> public <T> T convertSendAndReceive(D destination, Object request, Map<String, Ob
<ide>
<ide> MessageHeaders messageHeaders = (headers != null) ? new MessageHeaders(headers) : null;
<ide> Message<?> requestMessage = getMessageConverter().toMessage(request, messageHeaders);
<add>
<add> if (requestMessage == null) {
<add> String payloadType = (request != null) ? request.getClass().getName() : null;
<add> throw new MessageConversionException("Unable to convert payload type '"
<add> + payloadType + "', Content-Type=" + messageHeaders.get(MessageHeaders.CONTENT_TYPE)
<add> + ", converter=" + getMessageConverter(), null);
<add> }
<add>
<ide> if (postProcessor != null) {
<ide> requestMessage = postProcessor.postProcessMessage(requestMessage);
<ide> }
<add>
<ide> Message<?> replyMessage = this.sendAndReceive(destination, requestMessage);
<ide> return (replyMessage != null) ? (T) getMessageConverter().fromMessage(replyMessage, targetClass) : null;
<ide> }
<ide><path>spring-messaging/src/test/java/org/springframework/messaging/core/MessageSendingTemplateTests.java
<ide>
<ide> package org.springframework.messaging.core;
<ide>
<add>import java.util.Arrays;
<ide> import java.util.Collections;
<add>import java.util.HashMap;
<ide> import java.util.Map;
<ide>
<ide> import org.junit.Before;
<ide> import org.junit.Test;
<ide>
<ide> import org.springframework.messaging.Message;
<add>import org.springframework.messaging.MessageHeaders;
<add>import org.springframework.messaging.converter.*;
<ide> import org.springframework.messaging.support.GenericMessage;
<add>import org.springframework.util.MimeType;
<add>import org.springframework.util.MimeTypeUtils;
<ide>
<ide> import static org.junit.Assert.*;
<ide>
<ide> public class MessageSendingTemplateTests {
<ide> public void setup() {
<ide> this.template = new TestMessageSendingTemplate();
<ide> this.postProcessor = new TestMessagePostProcessor();
<del> this.headers = Collections.<String, Object>singletonMap("key", "value");
<add> this.headers = new HashMap<>();
<add> this.headers.put("key", "value");
<ide> }
<ide>
<ide> @Test
<ide> public void convertAndSendPayloadWithPostProcessorToDestination() {
<ide> assertSame(this.template.message, this.postProcessor.getMessage());
<ide> }
<ide>
<add> @Test(expected = MessageConversionException.class)
<add> public void convertAndSendNoMatchingConverter() {
<add>
<add> MessageConverter converter = new CompositeMessageConverter(
<add> Arrays.asList(new MappingJackson2MessageConverter()), new DefaultContentTypeResolver());
<add> this.template.setMessageConverter(converter);
<add>
<add> this.headers.put(MessageHeaders.CONTENT_TYPE, MimeTypeUtils.APPLICATION_XML);
<add> this.template.convertAndSend("home", "payload", new MessageHeaders(this.headers));
<add> }
<add>
<ide>
<ide> private static class TestMessageSendingTemplate extends AbstractMessageSendingTemplate<String> {
<ide> | 5 |
Ruby | Ruby | remove outdated comment [ci skip] | 45f06a7ce88e4589bccc9679ff3b44ea8690af89 | <ide><path>activerecord/lib/active_record/connection_adapters/postgresql/schema_statements.rb
<ide> def indexes(table_name, name = nil)
<ide>
<ide> # Returns the list of all column definitions for a table.
<ide> def columns(table_name)
<del> # Limit, precision, and scale are all handled by the superclass.
<ide> column_definitions(table_name).map do |column_name, type, default, notnull, oid, fmod, collation|
<ide> oid = oid.to_i
<ide> fmod = fmod.to_i | 1 |
Text | Text | update serialization docs [ci skip] | 089f44cc564b30ff7cfe0ff8c68adacaa75c477f | <ide><path>website/docs/usage/101/_serialization.md
<ide> file or a byte string. This process is called serialization. spaCy comes with
<ide> > object to and from disk, but it's also used for distributed computing, e.g.
<ide> > with
<ide> > [PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
<del>> or [Dask](http://dask.pydata.org/en/latest/). When you unpickle an object,
<del>> you're agreeing to execute whatever code it contains. It's like calling
<del>> `eval()` on a string – so don't unpickle objects from untrusted sources.
<add>> or [Dask](https://dask.org). When you unpickle an object, you're agreeing to
<add>> execute whatever code it contains. It's like calling `eval()` on a string – so
<add>> don't unpickle objects from untrusted sources.
<ide>
<ide> All container classes, i.e. [`Language`](/api/language) (`nlp`),
<ide> [`Doc`](/api/doc), [`Vocab`](/api/vocab) and [`StringStore`](/api/stringstore) | 1 |
PHP | PHP | add removesubcommand in consoleoptionparser | 79dc624062582ac57ce4ca19f8945d8addc6b92e | <ide><path>lib/Cake/Console/ConsoleOptionParser.php
<ide> public function addSubcommand($name, $options = array()) {
<ide> return $this;
<ide> }
<ide>
<add>/**
<add> * Remove an subcommand from the option parser.
<add> *
<add> * @param string $name The subcommand name to remove.
<add> * @return ConsoleOptionParser this
<add> */
<add> public function removeSubcommand($name) {
<add> unset($this->_subcommands[$name]);
<add> return $this;
<add> }
<add>
<ide> /**
<ide> * Add multiple subcommands at once.
<ide> *
<ide><path>lib/Cake/Test/Case/Console/ConsoleOptionParserTest.php
<ide> public function testAddSubcommandObject() {
<ide> $this->assertEquals('test', $result['test']->name());
<ide> }
<ide>
<add>/**
<add> * test removeSubcommand with an object.
<add> *
<add> * @return void
<add> */
<add> public function testRemoveSubcommand() {
<add> $parser = new ConsoleOptionParser('test', false);
<add> $parser->addSubcommand(new ConsoleInputSubcommand('test'));
<add> $result = $parser->subcommands();
<add> $this->assertEquals(1, count($result));
<add> $parser->removeSubcommand('test');
<add> $result = $parser->subcommands();
<add> $this->assertEquals(0, count($result), 'Remove a subcommand does not work');
<add> }
<add>
<ide> /**
<ide> * test adding multiple subcommands
<ide> * | 2 |
Text | Text | add link to docs readme | 67ca7415e71a3e6389b883f746a1a15580deb892 | <ide><path>README.md
<ide> Contributing to Docker
<ide> [](https://ci.dockerproject.com/github.com/docker/docker)
<ide>
<ide> Want to hack on Docker? Awesome! There are instructions to get you
<del>started [here](CONTRIBUTING.md).
<add>started [here](CONTRIBUTING.md). If you'd like to contribute to the
<add>documentation, please take a look at this [README.md](https://github.com/docker/docker/blob/master/docs/README.md).
<ide>
<del>They are probably not perfect, please let us know if anything feels
<del>wrong or incomplete.
<add>These instructions are probably not perfect, please let us know if anything feels wrong or incomplete.
<ide>
<ide> ### Legal
<ide> | 1 |
Python | Python | fix text_to_word_sequence with unicode text | 71bfb00788ec6e899abc8d2de1a67a281879bccd | <ide><path>keras/preprocessing/text.py
<ide> def text_to_word_sequence(text,
<ide> """
<ide> if lower:
<ide> text = text.lower()
<del> text = text.translate(maketrans(filters, split * len(filters)))
<add>
<add> if sys.version_info < (3,) and isinstance(text, unicode):
<add> translate_map = dict((ord(c), unicode(split)) for c in filters)
<add> else:
<add> translate_map = maketrans(filters, split * len(filters))
<add>
<add> text = text.translate(translate_map)
<ide> seq = text.split(split)
<ide> return [i for i in seq if i]
<ide>
<ide><path>tests/keras/preprocessing/text_test.py
<add># -*- coding: utf-8 -*-
<add>
<ide> import numpy as np
<ide> import pytest
<ide>
<del>from keras.preprocessing.text import Tokenizer, one_hot, hashing_trick
<add>from keras.preprocessing.text import Tokenizer, one_hot, hashing_trick, text_to_word_sequence
<ide>
<ide>
<ide> def test_one_hot():
<ide> def test_tokenizer():
<ide> matrix = tokenizer.texts_to_matrix(texts, mode)
<ide>
<ide>
<add>def test_text_to_word_sequence():
<add> text = 'hello! ? world!'
<add> assert text_to_word_sequence(text) == ['hello', 'world']
<add>
<add>
<add>def test_text_to_word_sequence_unicode():
<add> text = u'ali! veli? kırk dokuz elli'
<add> assert text_to_word_sequence(text) == [u'ali', u'veli', u'kırk', u'dokuz', u'elli']
<add>
<add>
<add>def test_tokenizer_unicode():
<add> texts = [u'ali veli kırk dokuz elli', u'ali veli kırk dokuz elli veli kırk dokuz']
<add> tokenizer = Tokenizer(num_words=5)
<add> tokenizer.fit_on_texts(texts)
<add>
<add> assert len(tokenizer.word_counts) == 5
<add>
<add>
<ide> if __name__ == '__main__':
<ide> pytest.main([__file__]) | 2 |
Text | Text | switch installation to a tab when hash is present | 4c8c5debac5074427d5e6ce19896d4062e078a83 | <ide><path>docs/docs/installation.md
<ide> To load a specific version of `react` and `react-dom`, replace `15` with the ver
<ide> If you use Bower, React is available via the `react` package.
<ide>
<ide> <script>
<add>/**
<add> * The code below is based on a snippet from React Native Getting Started page.
<add> */
<add>
<ide> // Convert <div>...<span><block /></span>...</div>
<ide> // Into <div>...<block />...</div>
<ide> var blocks = document.getElementsByTagName('block');
<ide> function display(type, value) {
<ide> container.className = 'display-' + type + '-' + value + ' ' +
<ide> container.className.replace(RegExp('display-' + type + '-[a-z]+ ?'), '');
<ide> }
<add>
<add>// If we are coming to the page with a hash in it (i.e. from a search, for example), try to get
<add>// us as close as possible to the correct platform and dev os using the hashtag and block walk up.
<add>var foundHash = false;
<add>if (window.location.hash !== '' && window.location.hash !== 'content') { // content is default
<add> // Hash links are added a bit later so we wait for them.
<add> window.addEventListener('DOMContentLoaded', selectTabForHashLink);
<add>}
<add>
<add>function selectTabForHashLink() {
<add> var hashLinks = document.querySelectorAll('a.hash-link');
<add> for (var i = 0; i < hashLinks.length && !foundHash; ++i) {
<add> if (hashLinks[i].hash === window.location.hash) {
<add> var parent = hashLinks[i].parentElement;
<add> while (parent) {
<add> if (parent.tagName === 'BLOCK') {
<add> var target = null;
<add> if (parent.className.indexOf('fiddle') > -1) {
<add> target = 'fiddle';
<add> } else if (parent.className.indexOf('newapp') > -1) {
<add> target = 'newapp';
<add> } else if (parent.className.indexOf('existingapp') > -1) {
<add> target = 'existingapp';
<add> } else {
<add> break; // assume we don't have anything.
<add> }
<add> display('target', target);
<add> foundHash = true;
<add> break;
<add> }
<add> parent = parent.parentElement;
<add> }
<add> }
<add> }
<add>}
<ide> </script>
<ide>\ No newline at end of file | 1 |
Text | Text | add tests reflecting | 9cf283aab950fb58e75e95c04ae0b7f80d6387f9 | <ide><path>curriculum/challenges/english/02-javascript-algorithms-and-data-structures/basic-data-structures/check-if-an-object-has-a-property.english.md
<ide> We've created an object, <code>users</code>, with some users in it and a functio
<ide>
<ide> ```yml
<ide> tests:
<del> - text: The <code>users</code> object only contains the keys <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code>
<del> testString: assert('Alan' in users && 'Jeff' in users && 'Sarah' in users && 'Ryan' in users && Object.keys(users).length === 4, 'The <code>users</code> object only contains the keys <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code>');
<del> - text: The function <code>isEveryoneHere</code> returns <code>true</code> if <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code> are properties on the <code>users</code> object
<del> testString: assert(isEveryoneHere(users) === true, 'The function <code>isEveryoneHere</code> returns <code>true</code> if <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code> are properties on the <code>users</code> object');
<del> - text: The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code> are not properties on the <code>users</code> object
<del> testString: assert((function() { delete users.Alan; delete users.Jeff; delete users.Sarah; delete users.Ryan; return isEveryoneHere(users) })() === false, 'The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code> are not properties on the <code>users</code> object');
<del>
<add> - text: 'The <code>users</code> object only contains the keys <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code>'
<add> testString: 'assert("Alan" in users && "Jeff" in users && "Sarah" in users && "Ryan" in users && Object.keys(users).length === 4, "The <code>users</code> object only contains the keys <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code>");'
<add> - text: 'The function <code>isEveryoneHere</code> returns <code>true</code> if <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code> are properties on the <code>users</code> object'
<add> testString: 'assert(isEveryoneHere(users) === true, "The function <code>isEveryoneHere</code> returns <code>true</code> if <code>Alan</code>, <code>Jeff</code>, <code>Sarah</code>, and <code>Ryan</code> are properties on the <code>users</code> object");'
<add> - text: 'The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Alan</code> is not a property on the <code>users</code> object'
<add> testString: 'assert((function() { delete users.Alan; return isEveryoneHere(users) })() === false, "The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Alan</code> is not a property on the <code>users</code> object");'
<add> - text: 'The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Jeff</code> is not a property on the <code>users</code> object'
<add> testString: 'assert((function() { delete users.Jeff; return isEveryoneHere(users) })() === false, "The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Jeff</code> is not a property on the <code>users</code> object");'
<add> - text: 'The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Sarah</code> is not a property on the <code>users</code> object'
<add> testString: 'assert((function() { delete users.Sarah; return isEveryoneHere(users) })() === false, "The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Sarah</code> is not a property on the <code>users</code> object");'
<add> - text: 'The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Ryan</code> is not a property on the <code>users</code> object'
<add> testString: 'assert((function() { delete users.Ryan; return isEveryoneHere(users) })() === false, "The function <code>isEveryoneHere</code> returns <code>false</code> if <code>Ryan</code> is not a property on the <code>users</code> object");'
<ide> ```
<ide>
<ide> </section> | 1 |
Text | Text | update commands.rb content in initialization guide | 3d449dee5ed60b4d6c688c919c527dd70f528043 | <ide><path>guides/source/initialization.md
<ide> A standard Rails application depends on several gems, specifically:
<ide>
<ide> ### `rails/commands.rb`
<ide>
<del>Once `config/boot.rb` has finished, the next file that is required is `rails/commands` which will execute a command based on the arguments passed in. In this case, the `ARGV` array simply contains `server` which is extracted into the `command` variable using these lines:
<add>Once `config/boot.rb` has finished, the next file that is required is `rails/commands`, which helps in expanding aliases. In the current case, the `ARGV` array simply contains `server` which will be passed over to `rails/commands_tasks`.
<ide>
<ide> ```ruby
<ide> ARGV << '--help' if ARGV.empty?
<ide> aliases = {
<ide>
<ide> command = ARGV.shift
<ide> command = aliases[command] || command
<add>
<add>require 'rails/commands/commands_tasks'
<add>
<add>Rails::CommandsTasks.new(ARGV).run_command!(command)
<ide> ```
<ide>
<ide> TIP: As you can see, an empty ARGV list will make Rails show the help
<ide> snippet.
<ide>
<del>If we used `s` rather than `server`, Rails will use the `aliases` defined in the file and match them to their respective commands. With the `server` command, Rails will run this code:
<add>If we had used `s` rather than `server`, Rails would have used the `aliases` defined here to find the matching command.
<add>
<add>With the `server` command, Rails will run this code:
<ide>
<ide> ```ruby
<ide> when 'server' | 1 |
Python | Python | fix broken static check on main | c0fbe3ad12eae4dc408a0ef103f2a359762f73e3 | <ide><path>airflow/utils/process_utils.py
<ide> def execute_interactive(cmd: List[str], **kwargs):
<ide> try:
<ide> # use os.setsid() make it run in a new process group, or bash job control will not be enabled
<ide> with subprocess.Popen(
<del> cmd, stdin=secondary_fd, stdout=secondary_fd, stderr=secondary_fd, universal_newlines=True, **kwargs
<add> cmd,
<add> stdin=secondary_fd,
<add> stdout=secondary_fd,
<add> stderr=secondary_fd,
<add> universal_newlines=True,
<add> **kwargs,
<ide> ) as proc:
<ide> while proc.poll() is None:
<ide> readable_fbs, _, _ = select.select([sys.stdin, primary_fd], [], []) | 1 |
Go | Go | remove verbose logging for non errors | 055f1a1f8181d51d3386b4270f810efd59d51d30 | <ide><path>pkg/proxy/tcp_proxy.go
<ide> func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr) (*TCPProxy, error) {
<ide> func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) {
<ide> backend, err := net.DialTCP("tcp", nil, proxy.backendAddr)
<ide> if err != nil {
<del> log.Printf("Can't forward traffic to backend tcp/%v: %v\n", proxy.backendAddr, err.Error())
<add> log.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err)
<ide> client.Close()
<ide> return
<ide> }
<ide> func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) {
<ide> event <- written
<ide> }
<ide>
<del> log.Printf("Forwarding traffic between tcp/%v and tcp/%v", client.RemoteAddr(), backend.RemoteAddr())
<ide> go broker(client, backend)
<ide> go broker(backend, client)
<ide>
<ide> func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) {
<ide> for ; i < 2; i++ {
<ide> transferred += <-event
<ide> }
<del> goto done
<add> return
<ide> }
<ide> }
<ide> client.Close()
<ide> backend.Close()
<del>done:
<del> log.Printf("%v bytes transferred between tcp/%v and tcp/%v", transferred, client.RemoteAddr(), backend.RemoteAddr())
<ide> }
<ide>
<ide> func (proxy *TCPProxy) Run() {
<ide> quit := make(chan bool)
<ide> defer close(quit)
<del> log.Printf("Starting proxy on tcp/%v for tcp/%v", proxy.frontendAddr, proxy.backendAddr)
<ide> for {
<ide> client, err := proxy.listener.Accept()
<ide> if err != nil {
<del> log.Printf("Stopping proxy on tcp/%v for tcp/%v (%v)", proxy.frontendAddr, proxy.backendAddr, err.Error())
<add> log.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
<ide> return
<ide> }
<ide> go proxy.clientLoop(client.(*net.TCPConn), quit)
<ide><path>pkg/proxy/udp_proxy.go
<ide> func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr
<ide> proxy.connTrackLock.Lock()
<ide> delete(proxy.connTrackTable, *clientKey)
<ide> proxy.connTrackLock.Unlock()
<del> log.Printf("Done proxying between udp/%v and udp/%v", clientAddr.String(), proxy.backendAddr.String())
<ide> proxyConn.Close()
<ide> }()
<ide>
<ide> func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr
<ide> return
<ide> }
<ide> i += written
<del> log.Printf("Forwarded %v/%v bytes to udp/%v", i, read, clientAddr.String())
<ide> }
<ide> }
<ide> }
<ide>
<ide> func (proxy *UDPProxy) Run() {
<ide> readBuf := make([]byte, UDPBufSize)
<del> log.Printf("Starting proxy on udp/%v for udp/%v", proxy.frontendAddr, proxy.backendAddr)
<ide> for {
<ide> read, from, err := proxy.listener.ReadFromUDP(readBuf)
<ide> if err != nil {
<ide> // NOTE: Apparently ReadFrom doesn't return
<ide> // ECONNREFUSED like Read do (see comment in
<ide> // UDPProxy.replyLoop)
<del> if isClosedError(err) {
<del> log.Printf("Stopping proxy on udp/%v for udp/%v (socket was closed)", proxy.frontendAddr, proxy.backendAddr)
<del> } else {
<del> log.Printf("Stopping proxy on udp/%v for udp/%v (%v)", proxy.frontendAddr, proxy.backendAddr, err.Error())
<add> if !isClosedError(err) {
<add> log.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
<ide> }
<ide> break
<ide> }
<ide> func (proxy *UDPProxy) Run() {
<ide> if !hit {
<ide> proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr)
<ide> if err != nil {
<del> log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err)
<add> log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err)
<ide> continue
<ide> }
<ide> proxy.connTrackTable[*fromKey] = proxyConn
<ide> func (proxy *UDPProxy) Run() {
<ide> for i := 0; i != read; {
<ide> written, err := proxyConn.Write(readBuf[i:read])
<ide> if err != nil {
<del> log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err)
<add> log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err)
<ide> break
<ide> }
<ide> i += written
<del> log.Printf("Forwarded %v/%v bytes to udp/%v", i, read, proxy.backendAddr.String())
<ide> }
<ide> }
<ide> } | 2 |
PHP | PHP | add tests for belongstomany and hasmany saving | 353bd1a526ca1513fe41a1c4c0d3ccfa04ffefc5 | <ide><path>Cake/Test/TestCase/ORM/Association/BelongsToManyTest.php
<ide> public function testSaveWithReplaceReturnFalse() {
<ide> $this->assertFalse($assoc->save($entity, $options));
<ide> }
<ide>
<add>/**
<add> * Test that save() ignores non entity values.
<add> *
<add> * @return void
<add> */
<add> public function testSaveOnlyEntities() {
<add> $connection = \Cake\Database\ConnectionManager::get('test');
<add> $mock = $this->getMock(
<add> 'Cake\ORM\Table',
<add> ['save', 'schema'],
<add> [['table' => 'tags', 'connection' => $connection]]
<add> );
<add> $mock->primaryKey('id');
<add>
<add> $config = [
<add> 'sourceTable' => $this->article,
<add> 'targetTable' => $mock,
<add> 'saveStrategy' => BelongsToMany::SAVE_APPEND,
<add> ];
<add>
<add> $entity = new Entity([
<add> 'id' => 1,
<add> 'title' => 'First Post',
<add> 'tags' => [
<add> ['tag' => 'nope'],
<add> new Entity(['tag' => 'cakephp']),
<add> ]
<add> ]);
<add>
<add> $mock->expects($this->once())
<add> ->method('save')
<add> ->with($entity->tags[1]);
<add>
<add> $association = new BelongsToMany('Tags', $config);
<add> $association->save($entity);
<add> }
<add>
<ide> }
<ide><path>Cake/Test/TestCase/ORM/Association/HasManyTest.php
<ide> public function testCascadeDeleteCallbacks() {
<ide> $this->assertTrue($association->cascadeDelete($entity));
<ide> }
<ide>
<add>/**
<add> * Test that save() ignores non entity values.
<add> *
<add> * @return void
<add> */
<add> public function testSaveOnlyEntities() {
<add> $mock = $this->getMock('Cake\ORM\Table', [], [], '', false);
<add> $config = [
<add> 'sourceTable' => $this->author,
<add> 'targetTable' => $mock,
<add> ];
<add>
<add> $entity = new Entity([
<add> 'username' => 'Mark',
<add> 'email' => 'mark@example.com',
<add> 'articles' => [
<add> ['title' => 'First Post'],
<add> new Entity(['title' => 'Second Post']),
<add> ]
<add> ]);
<add>
<add> $mock->expects($this->once())
<add> ->method('save')
<add> ->with($entity->articles[1]);
<add>
<add> $association = new HasMany('Articles', $config);
<add> $association->save($entity);
<add> }
<ide> } | 2 |
Ruby | Ruby | remove build flag from ant dep | 355361fa3f6a4c18fe582a45148cc9dcd5bc50b9 | <ide><path>Library/Homebrew/dependency_collector.rb
<ide> def autotools_dep(spec, tags)
<ide>
<ide> def ant_dep(spec, tags)
<ide> if MacOS.version >= :mavericks
<del> tags << :build
<ide> Dependency.new(spec.to_s, tags)
<ide> end
<ide> end | 1 |
Text | Text | change the printf function of the hello world code | a9dfb97bbde8de26ec150befdb1e1082641e59af | <ide><path>guide/english/c/index.md
<ide> Now that you have that background, let's start with our 'Hello, World' program.
<ide>
<ide> int main(void)
<ide> {
<del> printf("hello, world\n");
<add> printf("Hello, World!\n");
<ide> return 0;
<ide> }
<ide> ``` | 1 |
Text | Text | define a generator before referring to it | cb389fc6d002b1ec9102f6d39d6de195dcf90c34 | <ide><path>guides/source/getting_started.md
<ide> If it says something like "Rails 3.2.9", you are ready to continue.
<ide>
<ide> ### Creating the Blog Application
<ide>
<del>Rails comes with a number of generators that are designed to make your development life easier. One of these is the new application generator, which will provide you with the foundation of a Rails application so that you don't have to write it yourself.
<add>Rails comes with a number of scripts called generators that are designed to make your development life easier by creating everything that's necessary to start working on a particular task. One of these is the new application generator, which will provide you with the foundation of a fresh Rails application so that you don't have to write it yourself.
<ide>
<ide> To use this generator, open a terminal, navigate to a directory where you have rights to create files, and type:
<ide> | 1 |
Python | Python | update quadratic equations solver | d2f7982a4ee105ca980b2446ed8fc2e32139dd7d | <ide><path>maths/quadratic_equations_complex_numbers.py
<del>from math import sqrt
<add>from cmath import sqrt
<ide> from typing import Tuple
<ide>
<ide>
<del>def QuadraticEquation(a: int, b: int, c: int) -> Tuple[str, str]:
<add>def quadratic_roots(a: int, b: int, c: int) -> Tuple[complex, complex]:
<ide> """
<ide> Given the numerical coefficients a, b and c,
<del> prints the solutions for a quadratic equation, for a*x*x + b*x + c.
<add> calculates the roots for any quadratic equation of the form ax^2 + bx + c
<ide>
<del> >>> QuadraticEquation(a=1, b=3, c=-4)
<del> ('1.0', '-4.0')
<del> >>> QuadraticEquation(5, 6, 1)
<del> ('-0.2', '-1.0')
<add> >>> quadratic_roots(a=1, b=3, c=-4)
<add> (1.0, -4.0)
<add> >>> quadratic_roots(5, 6, 1)
<add> (-0.2, -1.0)
<add> >>> quadratic_roots(1, -6, 25)
<add> ((3+4j), (3-4j))
<ide> """
<add>
<ide> if a == 0:
<del> raise ValueError("Coefficient 'a' must not be zero for quadratic equations.")
<add> raise ValueError("Coefficient 'a' must not be zero.")
<ide> delta = b * b - 4 * a * c
<del> if delta >= 0:
<del> return str((-b + sqrt(delta)) / (2 * a)), str((-b - sqrt(delta)) / (2 * a))
<del> """
<del> Treats cases of Complexes Solutions(i = imaginary unit)
<del> Ex.: a = 5, b = 2, c = 1
<del> Solution1 = (- 2 + 4.0 *i)/2 and Solution2 = (- 2 + 4.0 *i)/ 10
<del> """
<del> snd = sqrt(-delta)
<del> if b == 0:
<del> return f"({snd} * i) / 2", f"({snd} * i) / {2 * a}"
<del> b = -abs(b)
<del> return f"({b}+{snd} * i) / 2", f"({b}+{snd} * i) / {2 * a}"
<add>
<add> root_1 = (-b + sqrt(delta)) / (2 * a)
<add> root_2 = (-b - sqrt(delta)) / (2 * a)
<add>
<add> return (
<add> root_1.real if not root_1.imag else root_1,
<add> root_2.real if not root_2.imag else root_2,
<add> )
<ide>
<ide>
<ide> def main():
<del> solutions = QuadraticEquation(a=5, b=6, c=1)
<del> print("The equation solutions are: {} and {}".format(*solutions))
<del> # The equation solutions are: -0.2 and -1.0
<add> solutions = quadratic_roots(a=5, b=6, c=1)
<add> print("The solutions are: {} and {}".format(*solutions))
<ide>
<ide>
<ide> if __name__ == "__main__": | 1 |
Go | Go | remove redundant init() | 31d34681463a275f0bcc0dd43461d3e11cc6b61d | <ide><path>libnetwork/service_windows.go
<ide> type policyLists struct {
<ide> elb *hcsshim.PolicyList
<ide> }
<ide>
<del>var lbPolicylistMap map[*loadBalancer]*policyLists
<del>
<del>func init() {
<del> lbPolicylistMap = make(map[*loadBalancer]*policyLists)
<del>}
<add>var lbPolicylistMap = make(map[*loadBalancer]*policyLists)
<ide>
<ide> func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) {
<ide> if len(lb.vip) == 0 { | 1 |
Javascript | Javascript | update example to use a module | b9faec0673ce8b26bfb1ba6073a7d8379b57a9eb | <ide><path>src/ng/directive/form.js
<ide> function FormController(element, attrs, $scope, $animate) {
<ide> * </pre>
<ide> *
<ide> * @example
<del> <example deps="angular-animate.js" animations="true" fixBase="true">
<add> <example deps="angular-animate.js" animations="true" fixBase="true" module="formExample">
<ide> <file name="index.html">
<ide> <script>
<del> function Ctrl($scope) {
<del> $scope.userType = 'guest';
<del> }
<add> angular.module('formExample', [])
<add> .controller('FormController', ['$scope', function($scope) {
<add> $scope.userType = 'guest';
<add> }]);
<ide> </script>
<ide> <style>
<ide> .my-form {
<ide> function FormController(element, attrs, $scope, $animate) {
<ide> background: red;
<ide> }
<ide> </style>
<del> <form name="myForm" ng-controller="Ctrl" class="my-form">
<add> <form name="myForm" ng-controller="FormController" class="my-form">
<ide> userType: <input name="input" ng-model="userType" required>
<ide> <span class="error" ng-show="myForm.input.$error.required">Required!</span><br>
<ide> <tt>userType = {{userType}}</tt><br> | 1 |
PHP | PHP | add new validatewithbag macro to request | fdeb204e1cd3374a71d669fd2ff08323d88c1eef | <ide><path>src/Illuminate/Foundation/Providers/FoundationServiceProvider.php
<ide> use Illuminate\Http\Request;
<ide> use Illuminate\Support\AggregateServiceProvider;
<ide> use Illuminate\Support\Facades\URL;
<add>use Illuminate\Validation\ValidationException;
<ide>
<ide> class FoundationServiceProvider extends AggregateServiceProvider
<ide> {
<ide> public function registerRequestValidation()
<ide> Request::macro('validate', function (array $rules, ...$params) {
<ide> return validator()->validate($this->all(), $rules, ...$params);
<ide> });
<add>
<add> Request::macro('validateWithBag', function (string $errorBag, array $rules, ...$params) {
<add> try {
<add> return $this->validate($rules, ...$params);
<add> } catch (ValidationException $e) {
<add> $e->errorBag = $errorBag;
<add>
<add> throw $e;
<add> }
<add> });
<ide> }
<ide>
<ide> /**
<ide><path>tests/Integration/Validation/RequestValidationTest.php
<ide> public function testValidateMacroWhenItFails()
<ide>
<ide> $request->validate(['name' => 'string']);
<ide> }
<add>
<add> public function testValidateWithBagMacro()
<add> {
<add> $request = Request::create('/', 'GET', ['name' => 'Taylor']);
<add>
<add> $validated = $request->validateWithBag('some_bag', ['name' => 'string']);
<add>
<add> $this->assertSame(['name' => 'Taylor'], $validated);
<add> }
<add>
<add> public function testValidateWithBagMacroWhenItFails()
<add> {
<add> $request = Request::create('/', 'GET', ['name' => null]);
<add>
<add> try {
<add> $request->validateWithBag('some_bag', ['name' => 'string']);
<add> } catch (ValidationException $validationException) {
<add> $this->assertEquals('some_bag', $validationException->errorBag);
<add> }
<add> }
<ide> } | 2 |
Python | Python | add new and fix existing pricing tests, | 389fff015fc8f0cb36261048689bed76d8089ffd | <ide><path>test/test_pricing.py
<ide> def test_get_pricing_success(self):
<ide>
<ide> def test_get_pricing_invalid_file_path(self):
<ide> try:
<del> get_pricing(driver_type='compute', driver_name='bar',
<del> pricing_file_path='inexistent.json')
<del> except Exception:
<add> libcloud.pricing.get_pricing(driver_type='compute', driver_name='bar',
<add> pricing_file_path='inexistent.json')
<add> except IOError:
<ide> pass
<ide> else:
<ide> self.fail('Invalid pricing file path provided, but an exception was not'
<ide> ' thrown')
<ide>
<ide> def test_get_pricing_invalid_driver_type(self):
<ide> try:
<del> get_pricing(driver_type='invalid_type', driver_name='bar',
<del> pricing_file_path='inexistent.json')
<del> except Exception:
<add> libcloud.pricing.get_pricing(driver_type='invalid_type', driver_name='bar',
<add> pricing_file_path='inexistent.json')
<add> except AttributeError:
<ide> pass
<ide> else:
<ide> self.fail('Invalid driver_type provided, but an exception was not'
<ide> ' thrown')
<ide>
<add> def test_get_pricing_not_in_cache(self):
<add> try:
<add> libcloud.pricing.get_pricing(driver_type='compute', driver_name='inexistent',
<add> pricing_file_path='test/pricing_test.json')
<add> except KeyError:
<add> pass
<add> else:
<add> self.fail('Invalid driver provided, but an exception was not'
<add> ' thrown')
<add>
<ide> def test_invalid_pricing_cache(self):
<ide> libcloud.pricing.PRICING_DATA['compute']['foo'] = { 2: 2 }
<ide> self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) | 1 |
Javascript | Javascript | improve code in test-https-strict | fd115862f413899996130c00a6109cd5448d760e | <ide><path>test/parallel/test-https-strict.js
<ide> function read(fname) {
<ide> }
<ide>
<ide> // key1 is signed by ca1.
<del>var key1 = read('agent1-key.pem');
<del>var cert1 = read('agent1-cert.pem');
<add>const key1 = read('agent1-key.pem');
<add>const cert1 = read('agent1-cert.pem');
<ide>
<ide> // key2 has a self signed cert
<del>var key2 = read('agent2-key.pem');
<del>var cert2 = read('agent2-cert.pem');
<add>const key2 = read('agent2-key.pem');
<add>const cert2 = read('agent2-cert.pem');
<ide>
<ide> // key3 is signed by ca2.
<del>var key3 = read('agent3-key.pem');
<del>var cert3 = read('agent3-cert.pem');
<add>const key3 = read('agent3-key.pem');
<add>const cert3 = read('agent3-cert.pem');
<ide>
<del>var ca1 = read('ca1-cert.pem');
<del>var ca2 = read('ca2-cert.pem');
<add>const ca1 = read('ca1-cert.pem');
<add>const ca2 = read('ca2-cert.pem');
<ide>
<ide> // different agents to use different CA lists.
<ide> // this api is beyond bad.
<del>var agent0 = new https.Agent();
<del>var agent1 = new https.Agent({ ca: [ca1] });
<del>var agent2 = new https.Agent({ ca: [ca2] });
<del>var agent3 = new https.Agent({ ca: [ca1, ca2] });
<add>const agent0 = new https.Agent();
<add>const agent1 = new https.Agent({ ca: [ca1] });
<add>const agent2 = new https.Agent({ ca: [ca2] });
<add>const agent3 = new https.Agent({ ca: [ca1, ca2] });
<ide>
<del>var options1 = {
<add>const options1 = {
<ide> key: key1,
<ide> cert: cert1
<ide> };
<ide>
<del>var options2 = {
<add>const options2 = {
<ide> key: key2,
<ide> cert: cert2
<ide> };
<ide>
<del>var options3 = {
<add>const options3 = {
<ide> key: key3,
<ide> cert: cert3
<ide> };
<ide>
<del>var server1 = server(options1);
<del>var server2 = server(options2);
<del>var server3 = server(options3);
<add>const server1 = server(options1);
<add>const server2 = server(options2);
<add>const server3 = server(options3);
<ide>
<del>var listenWait = 0;
<add>let listenWait = 0;
<ide>
<ide> server1.listen(0, listening());
<ide> server2.listen(0, listening());
<ide> server3.listen(0, listening());
<ide>
<del>var responseErrors = {};
<del>var expectResponseCount = 0;
<del>var responseCount = 0;
<del>var pending = 0;
<add>const responseErrors = {};
<add>let expectResponseCount = 0;
<add>let responseCount = 0;
<add>let pending = 0;
<ide>
<ide>
<del>function server(options, port) {
<del> var s = https.createServer(options, handler);
<add>function server(options) {
<add> const s = https.createServer(options, handler);
<ide> s.requests = [];
<ide> s.expectCount = 0;
<ide> return s;
<ide> function handler(req, res) {
<ide>
<ide> function listening() {
<ide> listenWait++;
<del> return function() {
<add> return () => {
<ide> listenWait--;
<ide> if (listenWait === 0) {
<ide> allListening();
<ide> function listening() {
<ide>
<ide> function makeReq(path, port, error, host, ca) {
<ide> pending++;
<del> var options = {
<add> const options = {
<ide> port: port,
<ide> path: path,
<ide> ca: ca
<ide> function makeReq(path, port, error, host, ca) {
<ide> if (host) {
<ide> options.headers = { host: host };
<ide> }
<del> var req = https.get(options);
<add> const req = https.get(options);
<ide> expectResponseCount++;
<del> var server = port === server1.address().port ? server1 :
<add> const server = port === server1.address().port ? server1 :
<ide> port === server2.address().port ? server2 :
<ide> port === server3.address().port ? server3 :
<ide> null;
<ide>
<ide> if (!server) throw new Error('invalid port: ' + port);
<ide> server.expectCount++;
<ide>
<del> req.on('response', function(res) {
<add> req.on('response', (res) => {
<ide> responseCount++;
<del> assert.equal(res.connection.authorizationError, error);
<add> assert.strictEqual(res.connection.authorizationError, error);
<ide> responseErrors[path] = res.connection.authorizationError;
<ide> pending--;
<ide> if (pending === 0) {
<ide> function allListening() {
<ide>
<ide> }
<ide>
<del>process.on('exit', function() {
<del> console.error(responseErrors);
<del> assert.equal(server1.requests.length, server1.expectCount);
<del> assert.equal(server2.requests.length, server2.expectCount);
<del> assert.equal(server3.requests.length, server3.expectCount);
<del> assert.equal(responseCount, expectResponseCount);
<add>process.on('exit', () => {
<add> assert.strictEqual(server1.requests.length, server1.expectCount);
<add> assert.strictEqual(server2.requests.length, server2.expectCount);
<add> assert.strictEqual(server3.requests.length, server3.expectCount);
<add> assert.strictEqual(responseCount, expectResponseCount);
<ide> }); | 1 |
Python | Python | support sentinel with ssl" " | 50ae4331cec1e2d61f536b406b0ebfefe7f1a495 | <ide><path>celery/backends/redis.py
<ide> class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin):
<ide>
<ide> #: :pypi:`redis` client module.
<ide> redis = redis
<add> connection_class_ssl = redis.SSLConnection if redis else None
<ide>
<ide> #: Maximum number of connections in the pool.
<ide> max_connections = None
<ide> def __init__(self, host=None, port=None, db=None, password=None,
<ide> ssl = _get('redis_backend_use_ssl')
<ide> if ssl:
<ide> self.connparams.update(ssl)
<del> self.connparams['connection_class'] = redis.SSLConnection
<add> self.connparams['connection_class'] = self.connection_class_ssl
<ide>
<ide> if url:
<ide> self.connparams = self._params_from_url(url, self.connparams)
<ide> def __init__(self, host=None, port=None, db=None, password=None,
<ide> # redis_backend_use_ssl dict, check ssl_cert_reqs is valid. If set
<ide> # via query string ssl_cert_reqs will be a string so convert it here
<ide> if ('connection_class' in self.connparams and
<del> self.connparams['connection_class'] is redis.SSLConnection):
<add> issubclass(self.connparams['connection_class'], redis.SSLConnection)):
<ide> ssl_cert_reqs_missing = 'MISSING'
<ide> ssl_string_to_constant = {'CERT_REQUIRED': CERT_REQUIRED,
<ide> 'CERT_OPTIONAL': CERT_OPTIONAL,
<ide> def __reduce__(self, args=(), kwargs=None):
<ide> )
<ide>
<ide>
<add>if getattr(redis, "sentinel", None):
<add> class SentinelManagedSSLConnection(
<add> redis.sentinel.SentinelManagedConnection,
<add> redis.SSLConnection):
<add> """Connect to a Redis server using Sentinel + TLS.
<add>
<add> Use Sentinel to identify which Redis server is the current master
<add> to connect to and when connecting to the Master server, use an
<add> SSL Connection.
<add> """
<add>
<add> pass
<add>
<add>
<ide> class SentinelBackend(RedisBackend):
<ide> """Redis sentinel task result store."""
<ide>
<ide> sentinel = getattr(redis, "sentinel", None)
<add> connection_class_ssl = SentinelManagedSSLConnection if sentinel else None
<ide>
<ide> def __init__(self, *args, **kwargs):
<ide> if self.sentinel is None:
<ide><path>t/unit/backends/test_redis.py
<ide> def test_get_pool(self):
<ide> )
<ide> pool = x._get_pool(**x.connparams)
<ide> assert pool
<add>
<add> def test_backend_ssl(self):
<add> pytest.importorskip('redis')
<add>
<add> from celery.backends.redis import SentinelBackend
<add> self.app.conf.redis_backend_use_ssl = {
<add> 'ssl_cert_reqs': "CERT_REQUIRED",
<add> 'ssl_ca_certs': '/path/to/ca.crt',
<add> 'ssl_certfile': '/path/to/client.crt',
<add> 'ssl_keyfile': '/path/to/client.key',
<add> }
<add> self.app.conf.redis_socket_timeout = 30.0
<add> self.app.conf.redis_socket_connect_timeout = 100.0
<add> x = SentinelBackend(
<add> 'sentinel://:bosco@vandelay.com:123//1', app=self.app,
<add> )
<add> assert x.connparams
<add> assert len(x.connparams['hosts']) == 1
<add> assert x.connparams['hosts'][0]['host'] == 'vandelay.com'
<add> assert x.connparams['hosts'][0]['db'] == 1
<add> assert x.connparams['hosts'][0]['port'] == 123
<add> assert x.connparams['hosts'][0]['password'] == 'bosco'
<add> assert x.connparams['socket_timeout'] == 30.0
<add> assert x.connparams['socket_connect_timeout'] == 100.0
<add> assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED
<add> assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt'
<add> assert x.connparams['ssl_certfile'] == '/path/to/client.crt'
<add> assert x.connparams['ssl_keyfile'] == '/path/to/client.key'
<add>
<add> from celery.backends.redis import SentinelManagedSSLConnection
<add> assert x.connparams['connection_class'] is SentinelManagedSSLConnection | 2 |
Text | Text | remove meaningless 4 in use a retina image guide | d6b8c5fcee04741618ce69a8049c0676b60f8fed | <ide><path>guide/english/certifications/responsive-web-design/responsive-web-design-principles/use-a-retina-image-for-higher-resolution-displays/index.md
<ide> Following the instructions:
<ide>
<ide> Set the width and height of the img tag to half of their original values. In this case, both the original height and the original width are 200px.
<ide>
<del>the style 4 becomes:
<add>the style becomes:
<ide>
<ide> ```css
<ide> <style> | 1 |
Python | Python | prevent empty `queryset`s to raises assertionerror | 6f66798ad3242c742ab0c5edcc038a9c7b469c2e | <ide><path>rest_framework/permissions.py
<ide> def has_permission(self, request, view):
<ide> if queryset is None and getattr(view, '_ignore_model_permissions', False):
<ide> return True
<ide>
<del> assert queryset, (
<add> assert queryset is not None, (
<ide> 'Cannot apply DjangoModelPermissions on a view that '
<ide> 'does not have `.queryset` property.'
<ide> )
<ide><path>tests/test_permissions.py
<ide> def get_queryset(self):
<ide> return BasicModel.objects.all()
<ide>
<ide>
<add>class EmptyListView(generics.ListCreateAPIView):
<add> queryset = BasicModel.objects.none()
<add> serializer_class = BasicSerializer
<add> authentication_classes = [authentication.BasicAuthentication]
<add> permission_classes = [permissions.DjangoModelPermissions]
<add>
<add>
<ide> root_view = RootView.as_view()
<ide> instance_view = InstanceView.as_view()
<ide> get_queryset_list_view = GetQuerySetListView.as_view()
<add>empty_list_view = EmptyListView.as_view()
<ide>
<ide>
<ide> def basic_auth_header(username, password):
<ide> def test_options_updateonly(self):
<ide> self.assertIn('actions', response.data)
<ide> self.assertEqual(list(response.data['actions'].keys()), ['PUT'])
<ide>
<add> def test_empty_view_does_not_assert(self):
<add> request = factory.get('/1', HTTP_AUTHORIZATION=self.permitted_credentials)
<add> response = empty_list_view(request, pk=1)
<add> self.assertEqual(response.status_code, status.HTTP_200_OK)
<add>
<ide>
<ide> class BasicPermModel(models.Model):
<ide> text = models.CharField(max_length=100) | 2 |
Ruby | Ruby | improve test for leaky scope chain | 4df806f95f8c64c8a1406aebf5f82807ff76c611 | <ide><path>activerecord/test/cases/reflection_test.rb
<ide> def test_scope_chain_of_polymorphic_association_does_not_leak_into_other_hmt_ass
<ide> drink = department.chefs.create!(employable: DrinkDesigner.create!)
<ide> Recipe.create!(chef_id: drink.id, hotel_id: hotel.id)
<ide>
<add> expected_sql = capture_sql { hotel.recipes.to_a }
<add>
<add> Hotel.reflect_on_association(:recipes).clear_association_scope_cache
<add> hotel.reload
<ide> hotel.drink_designers.to_a
<del> assert_sql(/^(?!.*employable_type).*$/) { hotel.recipes.to_a }
<add> loaded_sql = capture_sql { hotel.recipes.to_a }
<add>
<add> assert_equal expected_sql, loaded_sql
<ide> end
<ide>
<ide> def test_nested? | 1 |
Text | Text | update doc about assets digest class [ci skip] | 8ee7d5e1fcea69713ab7d1389fbdf3944bf8a4c7 | <ide><path>guides/source/configuring.md
<ide> pipeline is enabled. It is set to `true` by default.
<ide>
<ide> * `config.assets.manifest` defines the full path to be used for the asset precompiler's manifest file. Defaults to a file named `manifest-<random>.json` in the `config.assets.prefix` directory within the public folder.
<ide>
<del>* `config.assets.digest` enables the use of MD5 fingerprints in asset names. Set to `true` by default.
<add>* `config.assets.digest` enables the use of SHA256 fingerprints in asset names. Set to `true` by default.
<ide>
<ide> * `config.assets.debug` disables the concatenation and compression of assets. Set to `true` by default in `development.rb`.
<ide>
<del>* `config.assets.version` is an option string that is used in MD5 hash generation. This can be changed to force all files to be recompiled.
<add>* `config.assets.version` is an option string that is used in SHA256 hash generation. This can be changed to force all files to be recompiled.
<ide>
<ide> * `config.assets.compile` is a boolean that can be used to turn on live Sprockets compilation in production.
<ide> | 1 |
PHP | PHP | fix fatal error handling | 275a6184a2b64684380151d938bb8f5478fb086f | <ide><path>lib/Cake/Core/App.php
<ide> public static function shutdown() {
<ide> if (static::$_objectCacheChange) {
<ide> Cache::write('object_map', static::$_objects, '_cake_core_');
<ide> }
<del> static::_checkFatalError();
<del> }
<del>
<del>/**
<del> * Check if a fatal error happened and trigger the configured handler if configured
<del> *
<del> * @return void
<del> */
<del> protected static function _checkFatalError() {
<del> $lastError = error_get_last();
<del> if (!is_array($lastError)) {
<del> return;
<del> }
<del>
<del> list(, $log) = ErrorHandler::mapErrorCode($lastError['type']);
<del> if ($log !== LOG_ERR) {
<del> return;
<del> }
<del>
<del> if (PHP_SAPI === 'cli') {
<del> $errorHandler = Configure::read('Error.consoleHandler');
<del> } else {
<del> $errorHandler = Configure::read('Error.handler');
<del> }
<del> if (!is_callable($errorHandler)) {
<del> return;
<del> }
<del> call_user_func($errorHandler, $lastError['type'], $lastError['message'], $lastError['file'], $lastError['line'], array());
<ide> }
<ide>
<ide> }
<ide><path>lib/Cake/Error/BaseErrorHandler.php
<ide> public function register() {
<ide> error_reporting($level);
<ide> set_error_handler([$this, 'handleError'], $level);
<ide> set_exception_handler([$this, 'handleException']);
<add> register_shutdown_function(function () {
<add> $error = error_get_last();
<add> if (!is_array($error)) {
<add> return;
<add> }
<add> $fatals = [
<add> E_USER_ERROR,
<add> E_ERROR,
<add> E_PARSE,
<add> ];
<add> if (!in_array($error['type'], $fatals, true)) {
<add> return;
<add> }
<add> $this->handleFatalError(
<add> $error['type'],
<add> $error['message'],
<add> $error['file'],
<add> $error['line']
<add> );
<add> });
<ide> }
<ide>
<ide> /** | 2 |
Javascript | Javascript | increase test timeout | beadd8e14fd9e60b7abeda0edece28fdeafb1c40 | <ide><path>test/TestCases.test.js
<ide> describe("TestCases", function() {
<ide> }).forEach(function(testName) {
<ide> var suite = describe(testName, function() {});
<ide> it(testName + " should compile", function(done) {
<del> this.timeout(10000);
<ide> var testDirectory = path.join(casesPath, category.name, testName);
<ide> var outputDirectory = path.join(__dirname, "js", config.name, category.name, testName);
<ide> var options = { | 1 |
Text | Text | fix punctuation [ci skip] | 6f9d7f346b218d559372797aff3accf5141db711 | <ide><path>guides/source/asset_pipeline.md
<ide> generates something like this:
<ide> rel="stylesheet" />
<ide> ```
<ide>
<del>Note: with the Asset Pipeline the :cache and :concat options aren't used
<add>NOTE: with the Asset Pipeline the `:cache` and `:concat` options aren't used
<ide> anymore, delete these options from the `javascript_include_tag` and
<ide> `stylesheet_link_tag`.
<ide> | 1 |
Text | Text | improve arabic translation for /react/props | a70164b08164a8630ac28dc509ccefac4205e770 | <ide><path>guide/arabic/react/props/index.md
<ide> ---
<ide> title: Props
<del>localeTitle: الدعائم
<add>localeTitle: الخصائص
<ide> ---
<del>### ما هي الدعائم؟
<add>### ما هي الخصائص (props)؟
<ide>
<del>الدعائم (اختصار الخصائص) هي التاريخ الذي تم تمريره إلى المكون. هم غير قابل للتغيير (للقراءة فقط).
<ide>\ No newline at end of file
<add>اختصار properties وهي البيانات التي يتم تمريرها إلى المكوّن. الخصائص غير قابل للتغيير من نفس العنصر وهي متوفرة بصيغة للقراءة فقط). | 1 |
PHP | PHP | fix strict errors and missing imports in sqlite | 538c9988706f4628cd8a71039ef71aa15ce249ff | <ide><path>lib/Cake/Model/Datasource/Database/Sqlite.php
<ide> <?php
<ide> /**
<del> * SQLite layer for DBO
<del> *
<del> * PHP 5
<del> *
<ide> * CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
<ide> * Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> *
<ide> *
<ide> * @copyright Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
<ide> * @link http://cakephp.org CakePHP(tm) Project
<del> * @package Cake.Model.Datasource.Database
<ide> * @since CakePHP(tm) v 0.9.0
<ide> * @license MIT License (http://www.opensource.org/licenses/mit-license.php)
<ide> */
<ide> namespace Cake\Model\Datasource\Database;
<add>
<ide> use Cake\Error;
<ide> use Cake\Model\Datasource\DboSource;
<add>use Cake\Model\Model;
<add>use Cake\Model\Schema;
<add>use Cake\Utility\String;
<ide> use \PDO;
<ide>
<ide> /** | 1 |
Text | Text | use details tags to clean up mailhog section | be2bb093d4505b3bd2bafdb38114474424bc37fe | <ide><path>docs/how-to-catch-outgoing-emails-locally.md
<ide> > **Note:** This is an **optional** step and is required only when working with email workflows
<ide>
<add>- [Introduction](#introduction)
<add>- [Installing MailHog](#installing-mailhog)
<add>- [Using MailHog](#using-mailhog)
<add>- [Useful Links](#useful-links)
<add>
<ide> ## Introduction
<ide>
<ide> Some email workflows, like updating a user's email, requires the back-end api-server to send outgoing emails. An alternative to using an email service provider to send actual email messages, Mailhog is a developer tool for email testing that will catch the email messages sent by your freeCodeCamp instance.
<ide>
<ide> ## Installing MailHog
<ide>
<del>MailHog can be installed on macOS, Windows and Linux.
<del>
<del>- [Introduction](#introduction)
<del>- [Installing MailHog](#installing-mailhog)
<del> - [Installing MailHog with Docker](#installing-mailhog-with-docker)
<del> - [Installing MailHog on macOS](#installing-mailhog-on-macos)
<del> - [Installing MailHog on Windows](#installing-mailhog-on-windows)
<del> - [Installing MailHog on Linux](#installing-mailhog-on-linux)
<del>- [Using MailHog](#using-mailhog)
<del>- [Useful Links](#useful-links)
<add>MailHog can be installed on macOS, Windows and Linux or used via Docker
<ide>
<del>### Installing MailHog with Docker
<add><details><summary>Installing MailHog with Docker</summary>
<ide>
<ide> If you have Docker installed then you can use
<ide>
<ide> ```bash
<ide> docker run -d --name mailhog --rm mailhog/mailhog
<ide> ```
<ide>
<del>to start MailHog in the background and
<add>to start MailHog in the background and
<ide>
<ide> ```bash
<ide> docker stop mailhog
<ide> docker stop mailhog
<ide> to stop it.
<ide>
<ide> When the installation completes, you can start [using MailHog](#using-mailhog).
<add></details>
<ide>
<del>### Installing MailHog on macOS
<add><details><summary>Installing MailHog on macOS</summary>
<ide>
<ide> Install MailHog on macOS with [Homebrew](https://brew.sh/):
<ide>
<ide> brew services start mailhog
<ide> The above commands will start a mailhog service in the background.
<ide>
<ide> When the installation completes, you can start [using MailHog](#using-mailhog).
<add></details>
<ide>
<del>### Installing MailHog on Windows
<add><details><summary>Installing MailHog on Windows</summary>
<ide>
<ide> Download the latest version of MailHog from [MailHog's official repository](https://github.com/mailhog/MailHog/releases). Locate and click on the link for your Windows version (32 or 64 bit) and a .exe file will be downloaded to your computer.
<ide>
<ide> When the download completes, click to open the file. A Windows firewall notifica
<ide> Close MailHog by closing the command prompt window. To start MailHog again, click on the MailHog executable (.exe) file that was downloaded initially - it is not necessary to download a new MailHog installation file.
<ide>
<ide> Start [using MailHog](#using-mailhog).
<add></details>
<ide>
<del>### Installing MailHog on Linux
<add><details><summary>Installing MailHog on Linux</summary>
<ide>
<ide> First, install [Go](https://golang.org).
<ide>
<ide> mailhog
<ide> ```
<ide>
<ide> Start [using MailHog](#using-mailhog).
<add></details>
<ide>
<ide> ## Using MailHog
<ide> | 1 |
Javascript | Javascript | invoke callbacks with undefined context | 2249234fee2b4f0ce204859728135b9767538383 | <ide><path>lib/fs.js
<ide> function makeCallback(cb) {
<ide> }
<ide>
<ide> return function() {
<del> return cb.apply(null, arguments);
<add> return cb.apply(undefined, arguments);
<ide> };
<ide> }
<ide>
<ide><path>test/parallel/test-fs-mkdtemp.js
<ide> assert(common.fileExists(utf8));
<ide> function handler(err, folder) {
<ide> assert.ifError(err);
<ide> assert(common.fileExists(folder));
<del> assert.strictEqual(this, null);
<add> assert.strictEqual(this, undefined);
<ide> }
<ide>
<ide> fs.mkdtemp(path.join(common.tmpDir, 'bar.'), common.mustCall(handler));
<ide><path>test/parallel/test-fs-stat.js
<ide> fs.open('.', 'r', undefined, common.mustCall(function(err, fd) {
<ide> // Confirm that we are not running in the context of the internal binding
<ide> // layer.
<ide> // Ref: https://github.com/nodejs/node/commit/463d6bac8b349acc462d345a6e298a76f7d06fb1
<del> assert.strictEqual(this, null);
<add> assert.strictEqual(this, undefined);
<ide> }));
<ide>
<ide> // fstatSync | 3 |
Python | Python | fix parametric testing for python 2.4 | 47221fe49a26140a57c6af6569afbaf055db15c8 | <ide><path>numpy/testing/parametric.py
<ide> def run(self, result=None):
<ide>
<ide> if result is None: result = self.defaultTestResult()
<ide>
<add> try:
<add> _testMethodName = getattr(self,"_testMethodName")
<add> except:
<add> _testMethodName = getattr(self,"_TestCase__testMethodName")
<add>
<ide> # Independent tests: each gets its own setup/teardown
<del> _testMethodName = getattr(self,"_testMethodName", "runTest")
<ide> if _testMethodName.startswith(self._indepParTestPrefix):
<ide> for t in getattr(self,_testMethodName)():
<ide> self.run_test(t,result) | 1 |
Python | Python | add svg to the 'make server' known types list | 03a39b899a84c0dfc8fac338b4d87529f478df01 | <ide><path>test/test.py
<ide> def prompt(question):
<ide> '.html': 'text/html',
<ide> '.js': 'application/javascript',
<ide> '.json': 'application/json',
<add> '.svg': 'image/svg+xml',
<ide> '.pdf': 'application/pdf',
<ide> '.xhtml': 'application/xhtml+xml',
<ide> '.ico': 'image/x-icon', | 1 |
Javascript | Javascript | fix regex to include .jsx | 29575b99b78eb0f2afa94d04941fddd4fb779f7d | <ide><path>examples/counter/webpack.config.js
<ide> module.exports = {
<ide> },
<ide> module: {
<ide> loaders: [{
<del> test: /\.js?$/,
<add> test: /\.jsx?$/,
<ide> loaders: ['react-hot', 'babel'],
<ide> exclude: /node_modules/
<ide> }] | 1 |
Javascript | Javascript | emit flush events on react native for reactperf | 49a1542c9f82b2934bd96374b222bcd86ab9d044 | <ide><path>src/renderers/native/ReactNativeMount.js
<ide> var ReactNativeMount = {
<ide> // Mute future events from the top level wrapper.
<ide> // It is an implementation detail that devtools should not know about.
<ide> instance._debugID = 0;
<add>
<add> if (__DEV__) {
<add> ReactInstrumentation.debugTool.onBeginFlush();
<add> }
<ide> }
<ide>
<ide> // The initial render is synchronous but any updates that happen during
<ide> var ReactNativeMount = {
<ide> ReactInstrumentation.debugTool.onMountRootComponent(
<ide> instance._renderedComponent._debugID
<ide> );
<add> ReactInstrumentation.debugTool.onEndFlush();
<ide> }
<ide> var component = instance.getPublicInstance();
<ide> if (callback) { | 1 |
Java | Java | remove jetbrains annotations inserted by ide | e214d69350d374a769e59c867f2c50488caf5980 | <ide><path>spring-webflux/src/main/java/org/springframework/web/reactive/result/method/annotation/ModelInitializer.java
<ide> import java.util.Optional;
<ide> import java.util.stream.Collectors;
<ide>
<del>import org.jetbrains.annotations.NotNull;
<ide> import reactor.core.publisher.Mono;
<ide>
<ide> import org.springframework.core.Conventions;
<ide> public Mono<Void> initModel(HandlerMethod handlerMethod, InitBinderBindingContex
<ide> });
<ide> }
<ide>
<del> @NotNull
<ide> private Mono<Void> invokeModelAttributeMethods(BindingContext bindingContext,
<ide> List<InvocableHandlerMethod> modelMethods, ServerWebExchange exchange) {
<ide>
<ide><path>spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/ModelInitializerTests.java
<ide> import java.util.Map;
<ide> import java.util.stream.Collectors;
<ide>
<del>import org.jetbrains.annotations.NotNull;
<ide> import org.junit.Before;
<ide> import org.junit.Test;
<ide> import reactor.core.publisher.Mono;
<ide> public void clearModelAttributeFromSession() throws Exception {
<ide> }
<ide>
<ide>
<del> @NotNull
<ide> private InitBinderBindingContext getBindingContext(Object controller) {
<ide>
<ide> List<SyncInvocableHandlerMethod> binderMethods = | 2 |
Python | Python | fix lemma ordering in test | 102f7979338b948744b6af06689f928deb72f27c | <ide><path>spacy/tests/regression/test_issue781.py
<ide>
<ide> # Note: "chromosomes" worked previous the bug fix
<ide> @pytest.mark.models('en')
<del>@pytest.mark.parametrize('word,lemmas', [("chromosomes", ["chromosome"]), ("endosomes", ["endosome"]), ("colocalizes", ["colocalize", "colocaliz"])])
<add>@pytest.mark.parametrize('word,lemmas', [("chromosomes", ["chromosome"]), ("endosomes", ["endosome"]), ("colocalizes", ["colocaliz", "colocalize"])])
<ide> def test_issue781(EN, word, lemmas):
<ide> lemmatizer = EN.Defaults.create_lemmatizer()
<ide> assert lemmatizer(word, 'noun', morphology={'number': 'plur'}) == lemmas | 1 |
Python | Python | improve clarity of in1d docstring | 6a3c80fb11d60569328e751bab7b664ed6a7967e | <ide><path>numpy/lib/arraysetops.py
<ide> def in1d(ar1, ar2, assume_unique=False, invert=False, kind=None):
<ide> to (but is faster than) ``np.invert(in1d(a, b))``.
<ide> kind : {None, 'sort', 'table'}, optional
<ide> The algorithm to use. This will not affect the final result,
<del> but will affect the speed. Default will select automatically
<del> based on memory considerations.
<add> but will affect the speed and memory use. The default, None,
<add> will select automatically based on memory considerations.
<ide>
<ide> * If 'sort', will use a mergesort-based approach. This will have
<ide> a memory usage of roughly 6 times the sum of the sizes of
<ide> def in1d(ar1, ar2, assume_unique=False, invert=False, kind=None):
<ide> to be the faster method if the following formula is true:
<ide> ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
<ide> but may use greater memory.
<del> * If `None`, will automatically choose 'table' if
<add> * If None, will automatically choose 'table' if
<ide> the required memory allocation is less than or equal to
<ide> 6 times the sum of the sizes of `ar1` and `ar2`,
<ide> otherwise will use 'sort'. This is done to not use
<ide> def isin(element, test_elements, assume_unique=False, invert=False,
<ide> than) ``np.invert(np.isin(a, b))``.
<ide> kind : {None, 'sort', 'table'}, optional
<ide> The algorithm to use. This will not affect the final result,
<del> but will affect the speed. Default will select automatically
<del> based on memory considerations.
<add> but will affect the speed and memory use. The default, None,
<add> will select automatically based on memory considerations.
<ide>
<ide> * If 'sort', will use a mergesort-based approach. This will have
<ide> a memory usage of roughly 6 times the sum of the sizes of
<ide> def isin(element, test_elements, assume_unique=False, invert=False,
<ide> to be the faster method if the following formula is true:
<ide> ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
<ide> but may use greater memory.
<del> * If `None`, will automatically choose 'table' if
<add> * If None, will automatically choose 'table' if
<ide> the required memory allocation is less than or equal to
<ide> 6 times the sum of the sizes of `ar1` and `ar2`,
<ide> otherwise will use 'sort'. This is done to not use | 1 |
Javascript | Javascript | use fixture files | 259e939518f7ba04215c97af50bda06b8d0fe6d6 | <ide><path>node-tests/blueprints/mixin-test-test.js
<ide> const modifyPackages = blueprintHelpers.modifyPackages;
<ide> const chai = require('ember-cli-blueprint-test-helpers/chai');
<ide> const expect = chai.expect;
<ide>
<add>const fixture = require('../helpers/fixture');
<add>
<ide> describe('Blueprint: mixin-test', function() {
<ide> setupTestHooks(this);
<ide>
<ide> describe('Blueprint: mixin-test', function() {
<ide> it('mixin-test foo', function() {
<ide> return emberGenerateDestroy(['mixin-test', 'foo'], _file => {
<ide> expect(_file('tests/unit/mixins/foo-test.js'))
<del> .to.contain("import FooMixin from 'my-app/mixins/foo';");
<add> .to.equal(fixture('mixin-test/default.js'));
<ide> });
<ide> });
<ide>
<ide> describe('Blueprint: mixin-test', function() {
<ide> it('mixin-test foo', function() {
<ide> return emberGenerateDestroy(['mixin-test', 'foo'], _file => {
<ide> expect(_file('tests/unit/mixins/foo-test.js'))
<del> .to.contain("import { describe, it } from 'mocha';")
<del> .to.contain("import FooMixin from 'my-app/mixins/foo';")
<del> .to.contain("describe('Unit | Mixin | foo', function() {");
<add> .to.equal(fixture('mixin-test/mocha.js'));
<ide> });
<ide> });
<ide> });
<ide> describe('Blueprint: mixin-test', function() {
<ide> it('mixin-test foo', function() {
<ide> return emberGenerateDestroy(['mixin-test', 'foo'], _file => {
<ide> expect(_file('tests/unit/mixins/foo-test.js'))
<del> .to.contain("import FooMixin from 'my-addon/mixins/foo';");
<add> .to.equal(fixture('mixin-test/addon.js'));
<ide> });
<ide> });
<ide> });
<ide><path>node-tests/fixtures/mixin-test/addon.js
<add>import EmberObject from '@ember/object';
<add>import FooMixin from 'my-addon/mixins/foo';
<add>import { module, test } from 'qunit';
<add>
<add>module('Unit | Mixin | foo');
<add>
<add>// Replace this with your real tests.
<add>test('it works', function(assert) {
<add> let FooObject = EmberObject.extend(FooMixin);
<add> let subject = FooObject.create();
<add> assert.ok(subject);
<add>});
<ide><path>node-tests/fixtures/mixin-test/default.js
<add>import EmberObject from '@ember/object';
<add>import FooMixin from 'my-app/mixins/foo';
<add>import { module, test } from 'qunit';
<add>
<add>module('Unit | Mixin | foo');
<add>
<add>// Replace this with your real tests.
<add>test('it works', function(assert) {
<add> let FooObject = EmberObject.extend(FooMixin);
<add> let subject = FooObject.create();
<add> assert.ok(subject);
<add>});
<ide><path>node-tests/fixtures/mixin-test/mocha.js
<add>import { expect } from 'chai';
<add>import { describe, it } from 'mocha';
<add>import EmberObject from '@ember/object';
<add>import FooMixin from 'my-app/mixins/foo';
<add>
<add>describe('Unit | Mixin | foo', function() {
<add> // Replace this with your real tests.
<add> it('works', function() {
<add> let FooObject = EmberObject.extend(FooMixin);
<add> let subject = FooObject.create();
<add> expect(subject).to.be.ok;
<add> });
<add>}); | 4 |
Mixed | Ruby | remove deprecated arguments from `#verify!` | 9c6ee1bed0292fc32c23dc1c68951ae64fc510be | <ide><path>activerecord/CHANGELOG.md
<add>* Remove deprecated arguments from `#verify!`.
<add>
<add> *Rafael Mendonça França*
<add>
<ide> * Remove deprecated argument `name` from `#indexes`.
<ide>
<ide> *Rafael Mendonça França*
<ide><path>activerecord/lib/active_record/connection_adapters/abstract_adapter.rb
<ide> def requires_reloading?
<ide> # Checks whether the connection to the database is still active (i.e. not stale).
<ide> # This is done under the hood by calling #active?. If the connection
<ide> # is no longer active, then this method will reconnect to the database.
<del> def verify!(*ignored)
<del> if ignored.size > 0
<del> ActiveSupport::Deprecation.warn("Passing arguments to #verify method of the connection has no effect and has been deprecated. Please remove all arguments from the #verify method call.")
<del> end
<add> def verify!
<ide> reconnect! unless active?
<ide> end
<ide>
<ide><path>activerecord/test/cases/adapters/mysql2/connection_test.rb
<ide> def test_successful_reconnection_after_timeout_with_verify
<ide> assert @connection.active?
<ide> end
<ide>
<del> def test_verify_with_args_is_deprecated
<del> assert_deprecated do
<del> @connection.verify!(option: true)
<del> end
<del> assert_deprecated do
<del> @connection.verify!([])
<del> end
<del> assert_deprecated do
<del> @connection.verify!({})
<del> end
<del> end
<del>
<ide> def test_execute_after_disconnect
<ide> @connection.disconnect!
<ide> | 3 |
Javascript | Javascript | add support for $route.reload() | 9fd3dfe49d283c136e29bf60c0da6d4fe2aaed3d | <ide><path>src/services.js
<ide> angularServiceInject('$route', function(location) {
<ide> */
<ide> otherwise: function(params) {
<ide> $route.when(null, params);
<add> },
<add>
<add> /**
<add> * @workInProgress
<add> * @ngdoc method
<add> * @name angular.service.$route#reload
<add> * @methodOf angular.service.$route
<add> *
<add> * @description
<add> * Causes `$route` service to reload (and recreate the `$route.current` scope) upon the next
<add> * eval even if {@link angular.service.$location $location} hasn't changed.
<add> */
<add> reload: function() {
<add> dirty++;
<ide> }
<ide> };
<ide> function updateRoute(){
<ide><path>test/servicesSpec.js
<ide> describe("service", function(){
<ide> expect($route.current.template).toBe('foo.html');
<ide> expect($route.current.scope.$parent).toBe(parentScope);
<ide> });
<add>
<add> it('should reload routes when reload() is called', function() {
<add> var scope = angular.scope(),
<add> $location = scope.$service('$location'),
<add> $route = scope.$service('$route'),
<add> onChangeSpy = jasmine.createSpy('onChange');
<add>
<add> $route.when('', {template: 'foo.html'});
<add> $route.onChange(onChangeSpy);
<add> expect($route.current).toBeNull();
<add> expect(onChangeSpy).not.toHaveBeenCalled();
<add>
<add> scope.$eval();
<add>
<add> expect($location.hash).toBe('');
<add> expect($route.current.template).toBe('foo.html');
<add> expect(onChangeSpy.callCount).toBe(1);
<add>
<add> $route.reload();
<add> scope.$eval();
<add>
<add> expect($location.hash).toBe('');
<add> expect($route.current.template).toBe('foo.html');
<add> expect(onChangeSpy.callCount).toBe(2);
<add> });
<ide> });
<ide>
<ide> | 2 |
Python | Python | handle empty tasklist. closes #873 | e7be679471f4f79ccd0caf556d0335db84877f0e | <ide><path>celery/app/builtins.py
<ide> def run(self, tasks, result, group_id, partial_args):
<ide> return result
<ide>
<ide> def prepare(self, options, tasks, args, **kwargs):
<add> AsyncResult = self.AsyncResult
<ide> options['group_id'] = group_id = \
<ide> options.setdefault('task_id', uuid())
<ide>
<ide> def prepare_member(task):
<ide> tid = opts['task_id']
<ide> except KeyError:
<ide> tid = opts['task_id'] = uuid()
<del> return task, self.AsyncResult(tid)
<add> return task, AsyncResult(tid)
<ide>
<del> tasks, results = zip(*[prepare_member(task) for task in tasks])
<add> try:
<add> tasks, results = zip(*[prepare_member(task) for task in tasks])
<add> except ValueError: # tasks empty
<add> tasks, results = [], []
<ide> return (tasks, self.app.GroupResult(group_id, results),
<ide> group_id, args)
<ide> | 1 |
Mixed | Ruby | update assert_redirected_to docs [ci skip] | 21575ddc32dedd358b938c6c66d0c8393a341434 | <ide><path>actionpack/lib/action_dispatch/testing/assertions/response.rb
<ide> def assert_response(type, message = nil)
<ide> end
<ide> end
<ide>
<del> # Asserts that the redirection options passed in match those of the redirect called in the latest action.
<del> # This match can be partial, such that <tt>assert_redirected_to(controller: "weblog")</tt> will also
<del> # match the redirection of <tt>redirect_to(controller: "weblog", action: "show")</tt> and so on.
<add> # Asserts that the response is a redirect to a URL matching the given options.
<ide> #
<ide> # # Asserts that the redirection was to the "index" action on the WeblogController
<ide> # assert_redirected_to controller: "weblog", action: "index"
<ide><path>guides/source/testing.md
<ide> Rails adds some custom assertions of its own to the `minitest` framework:
<ide> | [`assert_recognizes(expected_options, path, extras={}, message=nil)`](https://api.rubyonrails.org/classes/ActionDispatch/Assertions/RoutingAssertions.html#method-i-assert_recognizes) | Asserts that the routing of the given path was handled correctly and that the parsed options (given in the expected_options hash) match path. Basically, it asserts that Rails recognizes the route given by expected_options.|
<ide> | [`assert_generates(expected_path, options, defaults={}, extras = {}, message=nil)`](https://api.rubyonrails.org/classes/ActionDispatch/Assertions/RoutingAssertions.html#method-i-assert_generates) | Asserts that the provided options can be used to generate the provided path. This is the inverse of assert_recognizes. The extras parameter is used to tell the request the names and values of additional request parameters that would be in a query string. The message parameter allows you to specify a custom error message for assertion failures.|
<ide> | [`assert_response(type, message = nil)`](https://api.rubyonrails.org/classes/ActionDispatch/Assertions/ResponseAssertions.html#method-i-assert_response) | Asserts that the response comes with a specific status code. You can specify `:success` to indicate 200-299, `:redirect` to indicate 300-399, `:missing` to indicate 404, or `:error` to match the 500-599 range. You can also pass an explicit status number or its symbolic equivalent. For more information, see [full list of status codes](http://rubydoc.info/github/rack/rack/master/Rack/Utils#HTTP_STATUS_CODES-constant) and how their [mapping](https://rubydoc.info/github/rack/rack/master/Rack/Utils#SYMBOL_TO_STATUS_CODE-constant) works.|
<del>| [`assert_redirected_to(options = {}, message=nil)`](https://api.rubyonrails.org/classes/ActionDispatch/Assertions/ResponseAssertions.html#method-i-assert_redirected_to) | Asserts that the redirection options passed in match those of the redirect called in the latest action. This match can be partial, such that `assert_redirected_to(controller: "weblog")` will also match the redirection of `redirect_to(controller: "weblog", action: "show")` and so on. You can also pass named routes such as `assert_redirected_to root_path` and Active Record objects such as `assert_redirected_to @article`.|
<add>| [`assert_redirected_to(options = {}, message=nil)`](https://api.rubyonrails.org/classes/ActionDispatch/Assertions/ResponseAssertions.html#method-i-assert_redirected_to) | Asserts that the response is a redirect to a URL matching the given options. You can also pass named routes such as `assert_redirected_to root_path` and Active Record objects such as `assert_redirected_to @article`.|
<ide>
<ide> You'll see the usage of some of these assertions in the next chapter.
<ide> | 2 |
Java | Java | use custom config read in reactortcpclient | a372b683cde47651fcec6ef0cec9004d79c4975e | <ide><path>spring-messaging/src/main/java/org/springframework/messaging/simp/stomp/StompBrokerRelayMessageHandler.java
<ide>
<ide> package org.springframework.messaging.simp.stomp;
<ide>
<add>import java.io.IOException;
<ide> import java.util.Collection;
<ide> import java.util.Map;
<ide> import java.util.concurrent.Callable;
<ide><path>spring-messaging/src/main/java/org/springframework/messaging/tcp/reactor/ReactorTcpClient.java
<ide>
<ide> import java.lang.reflect.Modifier;
<ide> import java.net.InetSocketAddress;
<add>import java.util.Arrays;
<add>import java.util.Collections;
<add>import java.util.Properties;
<ide>
<ide> import org.apache.commons.logging.Log;
<ide> import org.apache.commons.logging.LogFactory;
<ide> import reactor.core.composable.Promise;
<ide> import reactor.core.composable.Stream;
<ide> import reactor.core.composable.spec.Promises;
<add>import reactor.core.configuration.ConfigurationReader;
<add>import reactor.core.configuration.DispatcherConfiguration;
<add>import reactor.core.configuration.ReactorConfiguration;
<ide> import reactor.function.Consumer;
<ide> import reactor.function.support.SingleUseConsumer;
<ide> import reactor.io.Buffer;
<ide> public ReactorTcpClient(String host, int port, Codec<Buffer, Message<P>, Message<P>> codec) {
<ide>
<ide> // Revisit in 1.1: is Environment still required w/ sync dispatcher?
<del> this.environment = new Environment();
<add> this.environment = new Environment(new SynchronousDispatcherConfigReader());
<ide>
<ide> this.tcpClient = new TcpClientSpec<Message<P>, Message<P>>(REACTOR_TCP_CLIENT_TYPE)
<ide> .env(this.environment)
<ide> .codec(codec)
<ide> .connect(host, port)
<del> .synchronousDispatcher()
<ide> .get();
<ide>
<ide> checkReactorVersion();
<ide> protected Void adapt(Void result) {
<ide> }
<ide> }
<ide>
<add>
<add> /**
<add> * A ConfigurationReader that enforces the use of a SynchronousDispatcher.
<add> *
<add> * <p>The {@link reactor.core.configuration.PropertiesConfigurationReader} used by
<add> * default automatically creates other dispatchers with thread pools that are
<add> * not needed.
<add> */
<add> private static class SynchronousDispatcherConfigReader implements ConfigurationReader {
<add>
<add> @Override
<add> public ReactorConfiguration read() {
<add> return new ReactorConfiguration(Arrays.<DispatcherConfiguration>asList(), "sync", new Properties());
<add> }
<add> }
<add>
<ide> }
<ide>\ No newline at end of file | 2 |
Python | Python | move displacy tests to own file | c3df4d1108cfec0f5b612bb426bf7a0a9220960f | <ide><path>spacy/tests/test_displacy.py
<add># coding: utf-8
<add>from __future__ import unicode_literals
<add>
<add>import pytest
<add>from spacy import displacy
<add>from spacy.tokens import Span
<add>
<add>from .util import get_doc
<add>
<add>
<add>def test_displacy_parse_ents(en_vocab):
<add> """Test that named entities on a Doc are converted into displaCy's format."""
<add> doc = get_doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
<add> doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
<add> ents = displacy.parse_ents(doc)
<add> assert isinstance(ents, dict)
<add> assert ents["text"] == "But Google is starting from behind "
<add> assert ents["ents"] == [{"start": 4, "end": 10, "label": "ORG"}]
<add>
<add>
<add>def test_displacy_parse_deps(en_vocab):
<add> """Test that deps and tags on a Doc are converted into displaCy's format."""
<add> words = ["This", "is", "a", "sentence"]
<add> heads = [1, 0, 1, -2]
<add> pos = ["DET", "VERB", "DET", "NOUN"]
<add> tags = ["DT", "VBZ", "DT", "NN"]
<add> deps = ["nsubj", "ROOT", "det", "attr"]
<add> doc = get_doc(en_vocab, words=words, heads=heads, pos=pos, tags=tags, deps=deps)
<add> deps = displacy.parse_deps(doc)
<add> assert isinstance(deps, dict)
<add> assert deps["words"] == [
<add> {"text": "This", "tag": "DET"},
<add> {"text": "is", "tag": "VERB"},
<add> {"text": "a", "tag": "DET"},
<add> {"text": "sentence", "tag": "NOUN"},
<add> ]
<add> assert deps["arcs"] == [
<add> {"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
<add> {"start": 2, "end": 3, "label": "det", "dir": "left"},
<add> {"start": 1, "end": 3, "label": "attr", "dir": "right"},
<add> ]
<add>
<add>
<add>def test_displacy_spans(en_vocab):
<add> """Test that displaCy can render Spans."""
<add> doc = get_doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
<add> doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
<add> html = displacy.render(doc[1:4], style="ent")
<add> assert html.startswith("<div")
<add>
<add>
<add>def test_displacy_render_wrapper(en_vocab):
<add> """Test that displaCy accepts custom rendering wrapper."""
<add>
<add> def wrapper(html):
<add> return "TEST" + html + "TEST"
<add>
<add> displacy.set_render_wrapper(wrapper)
<add> doc = get_doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
<add> doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
<add> html = displacy.render(doc, style="ent")
<add> assert html.startswith("TEST<div")
<add> assert html.endswith("/div>TEST")
<add>
<add>
<add>def test_displacy_raises_for_wrong_type(en_vocab):
<add> with pytest.raises(ValueError):
<add> displacy.render("hello world")
<ide><path>spacy/tests/test_misc.py
<ide> import pytest
<ide> from pathlib import Path
<ide> from spacy import util
<del>from spacy import displacy
<ide> from spacy import prefer_gpu, require_gpu
<del>from spacy.tokens import Span
<ide> from spacy._ml import PrecomputableAffine
<ide>
<del>from .util import get_doc
<del>
<ide>
<ide> @pytest.mark.parametrize("text", ["hello/world", "hello world"])
<ide> def test_util_ensure_path_succeeds(text):
<ide> def test_util_get_package_path(package):
<ide> assert isinstance(path, Path)
<ide>
<ide>
<del>def test_displacy_parse_ents(en_vocab):
<del> """Test that named entities on a Doc are converted into displaCy's format."""
<del> doc = get_doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
<del> doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
<del> ents = displacy.parse_ents(doc)
<del> assert isinstance(ents, dict)
<del> assert ents["text"] == "But Google is starting from behind "
<del> assert ents["ents"] == [{"start": 4, "end": 10, "label": "ORG"}]
<del>
<del>
<del>def test_displacy_parse_deps(en_vocab):
<del> """Test that deps and tags on a Doc are converted into displaCy's format."""
<del> words = ["This", "is", "a", "sentence"]
<del> heads = [1, 0, 1, -2]
<del> pos = ["DET", "VERB", "DET", "NOUN"]
<del> tags = ["DT", "VBZ", "DT", "NN"]
<del> deps = ["nsubj", "ROOT", "det", "attr"]
<del> doc = get_doc(en_vocab, words=words, heads=heads, pos=pos, tags=tags, deps=deps)
<del> deps = displacy.parse_deps(doc)
<del> assert isinstance(deps, dict)
<del> assert deps["words"] == [
<del> {"text": "This", "tag": "DET"},
<del> {"text": "is", "tag": "VERB"},
<del> {"text": "a", "tag": "DET"},
<del> {"text": "sentence", "tag": "NOUN"},
<del> ]
<del> assert deps["arcs"] == [
<del> {"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
<del> {"start": 2, "end": 3, "label": "det", "dir": "left"},
<del> {"start": 1, "end": 3, "label": "attr", "dir": "right"},
<del> ]
<del>
<del>
<del>def test_displacy_spans(en_vocab):
<del> """Test that displaCy can render Spans."""
<del> doc = get_doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
<del> doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
<del> html = displacy.render(doc[1:4], style="ent")
<del> assert html.startswith("<div")
<del>
<del>
<del>def test_displacy_render_wrapper(en_vocab):
<del> """Test that displaCy accepts custom rendering wrapper."""
<del>
<del> def wrapper(html):
<del> return "TEST" + html + "TEST"
<del>
<del> displacy.set_render_wrapper(wrapper)
<del> doc = get_doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
<del> doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
<del> html = displacy.render(doc, style="ent")
<del> assert html.startswith("TEST<div")
<del> assert html.endswith("/div>TEST")
<del>
<del>
<del>def test_displacy_raises_for_wrong_type(en_vocab):
<del> with pytest.raises(ValueError):
<del> displacy.render("hello world")
<del>
<del>
<ide> def test_PrecomputableAffine(nO=4, nI=5, nF=3, nP=2):
<ide> model = PrecomputableAffine(nO=nO, nI=nI, nF=nF, nP=nP)
<ide> assert model.W.shape == (nF, nO, nP, nI) | 2 |
Text | Text | fix names in contributing | 14961cf8dff745b3f2067df3187543fcae97ee43 | <ide><path>CONTRIBUTING.md
<ide> chance of keeping on top of things.
<ide> ## Submitting Changes
<ide>
<ide> * Push your changes to a topic branch in your fork of the repository.
<del>* Submit a pull request to the repository in the cakephp organization, with the
<add>* Submit a pull request to the repository in the CakePHP organization, with the
<ide> correct target branch.
<ide>
<ide> ## Test cases and codesniffer
<ide> driver settings as required to run tests for particular database.
<ide>
<ide> You can also register on [Travis CI](https://travis-ci.org/) and from your
<ide> [profile](https://travis-ci.org/profile) page enable the service hook for your
<del>cakephp fork on github for automated test builds.
<add>CakePHP fork on GitHub for automated test builds.
<ide>
<ide> To run the sniffs for CakePHP coding standards:
<ide> | 1 |
Javascript | Javascript | get router history working with flux | 80d36cc3cd13a24665af3203b3253b81cf37ec8e | <ide><path>client/index.js
<ide> import { hydrate } from 'thundercats';
<ide> import { render$ } from 'thundercats-react';
<ide>
<ide> import { app$ } from '../common/app';
<add>import synchroniseHistory from './synchronise-history';
<ide>
<ide> const debug = debugFactory('fcc:client');
<ide> const DOMContianer = document.getElementById('fcc');
<ide> const appLocation = createLocation(
<ide> location.pathname + location.search
<ide> );
<ide>
<del>function location$(history) {
<del> return Rx.Observable.create(function(observer) {
<del> const dispose = history.listen(function(location) {
<del> observer.onNext(location);
<del> });
<del>
<del> return Rx.Disposable.create(() => {
<del> dispose();
<del> });
<del> });
<del>}
<del>
<ide> // returns an observable
<ide> app$({ history, location: appLocation })
<ide> .flatMap(
<ide> app$({ history, location: appLocation })
<ide> ({ nextLocation, props }, appCat) => ({ nextLocation, props, appCat })
<ide> )
<ide> .doOnNext(({ appCat }) => {
<del> const appActions = appCat.getActions('appActions');
<del> const appStore = appCat.getStore('appStore');
<del>
<del> const route$ = location$(history)
<del> .pluck('pathname')
<del> .distinctUntilChanged();
<add> const { updateLocation, goTo, goBack } = appCat.getActions('appActions');
<add> const appStore$ = appCat.getStore('appStore');
<ide>
<del> appStore
<del> .pluck('route')
<del> .filter(route => !!route)
<del> .withLatestFrom(
<del> route$,
<del> (nextRoute, currentRoute) => ({ currentRoute, nextRoute })
<del> )
<del> // only continue when route change requested
<del> .filter(({ currentRoute, nextRoute }) => currentRoute !== nextRoute)
<del> .doOnNext(({ nextRoute }) => {
<del> debug('route change', nextRoute);
<del> history.pushState(history.state, nextRoute);
<del> })
<del> .subscribeOnError(err => console.error(err));
<add> const routerState$ = appStore$
<add> .map(({ location }) => location)
<add> .distinctUntilChanged(
<add> location => location && location.key ? location.key : location
<add> );
<ide>
<del> appActions.goBack.subscribe(function() {
<del> history.goBack();
<del> });
<del>
<del> appActions
<del> .updateRoute
<del> .pluck('route')
<del> .doOnNext(route => {
<del> debug('update route', route);
<del> history.pushState(history.state, route);
<del> })
<del> .subscribeOnError(err => console.error(err));
<add> synchroniseHistory(
<add> history,
<add> updateLocation,
<add> goTo,
<add> goBack,
<add> routerState$
<add> );
<ide> })
<ide> .flatMap(({ props, appCat }) => {
<ide> props.history = history;
<ide><path>client/synchronise-history.js
<add>import { Disposable, Observable } from 'rx';
<add>
<add>export function location$(history) {
<add> return Observable.create(function(observer) {
<add> const dispose = history.listen(function(location) {
<add> observer.onNext(location);
<add> });
<add>
<add> return Disposable.create(() => {
<add> dispose();
<add> });
<add> });
<add>}
<add>
<add>const emptyLocation = {
<add> pathname: '',
<add> search: '',
<add> hash: ''
<add>};
<add>
<add>let prevKey;
<add>let isSyncing = false;
<add>export default function synchroniseHistory(
<add> history,
<add> updateLocation,
<add> goTo,
<add> goBack,
<add> routerState$
<add>) {
<add> routerState$.subscribe(
<add> location => {
<add>
<add> if (!location) {
<add> return null;
<add> }
<add>
<add> // store location has changed, update history
<add> if (location.key !== prevKey) {
<add> isSyncing = true;
<add> history.transitionTo({ ...emptyLocation, ...location });
<add> isSyncing = false;
<add> }
<add> }
<add> );
<add>
<add> location$(history)
<add> .doOnNext(location => {
<add> prevKey = location.key;
<add>
<add> if (isSyncing) {
<add> return null;
<add> }
<add>
<add> return updateLocation(location);
<add> })
<add> .subscribe(() => {});
<add>
<add> goTo
<add> .doOnNext((route = '/') => {
<add> history.push(route);
<add> })
<add> .subscribe(() => {});
<add>
<add> goBack
<add> .doOnNext(() => {
<add> history.goBack();
<add> })
<add> .subscribe(() => {});
<add>}
<ide><path>common/app/flux/Actions.js
<ide> export default Actions({
<ide> });
<ide> },
<ide>
<del> updateRoute(route) {
<del> return { route };
<del> },
<del> goBack: null
<add> // routing
<add> goTo: null,
<add> goBack: null,
<add> updateLocation(location) {
<add> return {
<add> transform(state) {
<add> return { ...state, location };
<add> }
<add> };
<add> }
<ide> });
<ide><path>common/app/flux/Store.js
<ide> export default Store({
<ide> value: initValue
<ide> },
<ide> init({ instance: appStore, args: [cat] }) {
<del> const { updateRoute, getUser, setTitle } = cat.getActions('appActions');
<add> const {
<add> updateLocation,
<add> getUser,
<add> setTitle
<add> } = cat.getActions('appActions');
<add>
<ide> const register = createRegistrar(appStore);
<ide> const {
<ide> toggleQuestions,
<ide> export default Store({
<ide> } = cat.getActions('hikesActions');
<ide>
<ide> // app
<del> register(setter(fromMany(getUser, setTitle, updateRoute)));
<add> register(
<add> fromMany(
<add> setter(
<add> fromMany(
<add> getUser,
<add> setTitle
<add> )
<add> ),
<add> updateLocation
<add> )
<add> );
<ide>
<ide> // hikes
<ide> register(
<ide><path>common/app/routes/Hikes/flux/Actions.js
<ide> export default Actions({
<ide> const currentHike = findNextHike(hikes, id);
<ide>
<ide> // go to next route
<del> state.route = currentHike && currentHike.dashedName ?
<del> `/hikes/${ currentHike.dashedName }` :
<del> '/hikes';
<add> state.location = {
<add> action: 'PUSH',
<add> pathname: currentHike && currentHike.dashedName ?
<add> `/hikes/${ currentHike.dashedName }` :
<add> '/hikes'
<add> };
<ide>
<ide> const hikesApp = {
<ide> ...state.hikesApp, | 5 |
Ruby | Ruby | ignore $home/.curlrc when invoking curl | f501e7f00b7816aae703463815f5cef0e38e0086 | <ide><path>Library/Homebrew/global.rb
<ide>
<ide> HOMEBREW_USER_AGENT = "Homebrew #{HOMEBREW_VERSION} (Ruby #{RUBY_VERSION}-#{RUBY_PATCHLEVEL}; Mac OS X #{MACOS_FULL_VERSION})"
<ide>
<del>HOMEBREW_CURL_ARGS = '-f#LA'
<add>HOMEBREW_CURL_ARGS = '-qf#LA'
<ide>
<ide> RECOMMENDED_LLVM = 2326
<ide> RECOMMENDED_GCC_40 = (MACOS_VERSION >= 10.6) ? 5494 : 5493 | 1 |
Javascript | Javascript | fix scope issue in dom 2 event handlers | e0aa10a664aabbc9ab900a07ea55cb892a7b8d3f | <ide><path>src/event/event.js
<ide> jQuery.event = {
<ide> // around, causing it to be cloned in the process
<ide> if ( jQuery.browser.msie && element.setInterval != undefined )
<ide> element = window;
<del>
<add>
<ide> // if data is passed, bind to handler
<ide> if( data != undefined ) {
<ide> // Create temporary function pointer to original handler
<ide> jQuery.event = {
<ide> handler = function() {
<ide> // Pass arguments and context to original handler
<ide> return fn.apply(this, arguments);
<del> };
<add> };
<ide>
<ide> // Store data in unique handler
<del> handler.data = data;
<add> handler.data = data;
<ide>
<ide> // Set the guid of unique handler to the same of original handler, so it can be removed
<del> handler.guid = fn.guid;
<add> handler.guid = fn.guid;
<ide> }
<ide>
<ide> // Make sure that the function being executed has a unique ID
<ide> jQuery.event = {
<ide>
<ide> // Add the function to the element's handler list
<ide> handlers[handler.guid] = handler;
<add>
<add> if (!element.$handle) {
<add> element.$handle = function() {
<add> jQuery.event.handle.apply(element, arguments);
<add> };
<ide>
<del> // And bind the global event handler to the element
<del> if (element.addEventListener)
<del> element.addEventListener(type, this.handle, false);
<del> else if (element.attachEvent)
<del> element.attachEvent("on" + type, this.handle, false);
<add> // And bind the global event handler to the element
<add> if (element.addEventListener)
<add> element.addEventListener(type, element.$handle, false);
<add> else if (element.attachEvent)
<add> element.attachEvent("on" + type, element.$handle, false);
<add> }
<ide>
<ide> // Remember the function in a global list (for triggering)
<ide> if (!this.global[type])
<ide> jQuery.event = {
<ide> // remove generic event handler if no more handlers exist
<ide> for ( ret in events[type] ) break;
<ide> if ( !ret ) {
<del> ret = null;
<ide> if (element.removeEventListener)
<del> element.removeEventListener(type, this.handle, false);
<add> element.removeEventListener(type, element.$handle, false);
<ide> else if (element.detachEvent)
<del> element.detachEvent("on" + type, this.handle, false);
<add> element.detachEvent("on" + type, element.$handle, false);
<add> ret = element.$handle = null;
<ide> delete events[type];
<ide> }
<ide> } | 1 |
Javascript | Javascript | fix initialization with non routable statemanager | 640f00cacabddcbc6881ed4770fa500534ecbf70 | <ide><path>packages/ember-application/lib/system/application.js
<ide> Ember.Application = Ember.Namespace.extend(
<ide>
<ide> this.ready();
<ide>
<del> if (stateManager) {
<add> if (stateManager && stateManager instanceof Ember.Router) {
<ide> this.setupStateManager(stateManager);
<ide> }
<ide> },
<ide><path>packages/ember-application/tests/system/application_test.js
<ide> test("initialize controllers into a state manager", function() {
<ide> equal(getPath(stateManager, 'barController.target'), stateManager, "the state manager is assigned");
<ide> });
<ide>
<del>module("Ember.Application initial route", function() {
<add>test('initialized application go to initial route', function() {
<ide> Ember.run(function() {
<ide> app = Ember.Application.create({
<ide> rootElement: '#qunit-fixture'
<ide> });
<ide>
<del> app.stateManager = Ember.StateManager.create({
<add> app.stateManager = Ember.Router.create({
<ide> location: {
<ide> getURL: function() {
<ide> return '/';
<del> }
<add> },
<add> setURL: function() {},
<add> onUpdateURL: function() {}
<ide> },
<ide>
<ide> start: Ember.State.extend({
<ide> module("Ember.Application initial route", function() {
<ide>
<ide> equal(app.getPath('stateManager.currentState.path'), 'start.index', "The router moved the state into the right place");
<ide> });
<add>
<add>test("initialize application with non routable stateManager", function() {
<add> Ember.run(function() {
<add> app = Ember.Application.create({
<add> rootElement: '#qunit-fixture'
<add> });
<add>
<add> app.stateManager = Ember.StateManager.create({
<add> start: Ember.State.extend()
<add> });
<add> });
<add>
<add> equal(app.getPath('stateManager.currentState.path'), 'start', "Application sucessfuly started");
<add>}); | 2 |
Javascript | Javascript | remove `router.router` deprecation | 4842f036a330a39c57147898ca074c8a66946783 | <ide><path>packages/ember-routing/lib/system/router.js
<ide> function updatePaths(router) {
<ide> }
<ide>
<ide> EmberRouter.reopenClass({
<del> router: null,
<del>
<ide> /**
<ide> The `Router.map` function allows you to define mappings from URLs to routes
<ide> in your application. These mappings are defined within the
<ide> function representEmptyRoute(liveRoutes, defaultParentState, route) {
<ide> }
<ide> }
<ide>
<del>deprecateProperty(EmberRouter.prototype, 'router', '_routerMicrolib', {
<del> id: 'ember-router.router',
<del> until: '2.16',
<del> url: 'https://emberjs.com/deprecations/v2.x/#toc_ember-router-router-renamed-to-ember-router-_routermicrolib'
<del>});
<del>
<ide> export default EmberRouter;
<ide><path>packages/ember-routing/tests/system/router_test.js
<ide> QUnit.test('Router#triggerEvent ignores handlers that have not loaded yet', func
<ide>
<ide> triggerEvent(handlerInfos, false, ['loading']);
<ide> });
<del>
<del>QUnit.test('Router#router deprecates when called', function(assert) {
<del> assert.expect(2);
<del>
<del> let router = createRouter();
<del>
<del> expectDeprecation(function() {
<del> assert.equal(router.router, router._routerMicrolib);
<del> }, 'Usage of `router` is deprecated, use `_routerMicrolib` instead.');
<del>});
<del>
<del>QUnit.test('Router#_routerMicrolib can be used without deprecation', function(assert) {
<del> assert.expect(1);
<del>
<del> let router = createRouter();
<del>
<del> assert.ok(router._routerMicrolib, 'Router._routerMicrolib can be used without deprecation');
<del>}); | 2 |
Ruby | Ruby | provide a request and response to all controllers | 51c7ac142d31095d4c699f44cc44ddea627da1eb | <ide><path>actionpack/lib/action_controller/metal.rb
<ide> def self.controller_name
<ide> @controller_name ||= name.demodulize.sub(/Controller$/, '').underscore
<ide> end
<ide>
<add> def self.make_response!(request)
<add> ActionDispatch::Response.new.tap do |res|
<add> res.request = request
<add> end
<add> end
<add>
<ide> # Delegates to the class' <tt>controller_name</tt>
<ide> def controller_name
<ide> self.class.controller_name
<ide> def controller_name
<ide> # and response object available. You might wish to control the
<ide> # environment and response manually for performance reasons.
<ide>
<del> attr_internal :headers, :response, :request
<del> delegate :session, :to => "@_request"
<add> attr_internal :response, :request
<add> delegate :session, :headers, :to => "@_request"
<ide>
<ide> def initialize
<del> @_headers = {"Content-Type" => "text/html"}
<ide> @_status = 200
<ide> @_request = nil
<ide> @_response = nil
<ide> def params=(val)
<ide> # in Renderer and Redirector.
<ide>
<ide> def content_type=(type)
<del> headers["Content-Type"] = type.to_s
<add> response.content_type = type
<ide> end
<ide>
<ide> def content_type
<ide> def status=(status)
<ide>
<ide> def response_body=(body)
<ide> body = [body] unless body.nil? || body.respond_to?(:each)
<add> response.body = body
<ide> super
<ide> end
<ide>
<ide> def performed?
<ide> response_body || (response && response.committed?)
<ide> end
<ide>
<del> def dispatch(name, request) #:nodoc:
<add> def dispatch(name, request, response) #:nodoc:
<ide> set_request!(request)
<add> set_response!(response)
<ide> process(name)
<ide> to_a
<ide> end
<ide>
<add> def set_response!(response) # :nodoc:
<add> @_response = response
<add> end
<add>
<ide> def set_request!(request) #:nodoc:
<ide> @_request = request
<ide> @_request.controller_instance = self
<ide> class << self; deprecate :call; end
<ide> def self.action(name)
<ide> if middleware_stack.any?
<ide> middleware_stack.build(name) do |env|
<del> new.dispatch(name, ActionDispatch::Request.new(env))
<add> req = ActionDispatch::Request.new(env)
<add> res = make_response! req
<add> new.dispatch(name, req, res)
<ide> end
<ide> else
<del> lambda { |env| new.dispatch(name, ActionDispatch::Request.new(env)) }
<add> lambda { |env|
<add> req = ActionDispatch::Request.new(env)
<add> res = make_response! req
<add> new.dispatch(name, req, res)
<add> }
<ide> end
<ide> end
<ide>
<ide> # Direct dispatch to the controller. Instantiates the controller, then
<ide> # executes the action named +name+.
<del> def self.dispatch(name, req)
<add> def self.dispatch(name, req, res)
<ide> if middleware_stack.any?
<del> middleware_stack.build(name) { |env| new.dispatch(name, req) }.call req.env
<add> middleware_stack.build(name) { |env| new.dispatch(name, req, res) }.call req.env
<ide> else
<del> new.dispatch(name, req)
<add> new.dispatch(name, req, res)
<ide> end
<ide> end
<ide> end
<ide><path>actionpack/lib/action_controller/metal/head.rb
<ide> def head(status, options = {})
<ide> headers[key.to_s.dasherize.split('-').each { |v| v[0] = v[0].chr.upcase }.join('-')] = value.to_s
<ide> end
<ide>
<add> response.status = Rack::Utils.status_code(status)
<add>
<ide> self.status = status
<ide> self.location = url_for(location) if location
<ide>
<ide> def head(status, options = {})
<ide> if include_content?(self.response_code)
<ide> self.content_type = content_type || (Mime[formats.first] if formats)
<ide> self.response.charset = false if self.response
<del> else
<del> headers.delete('Content-Type')
<del> headers.delete('Content-Length')
<ide> end
<ide>
<ide> true
<ide><path>actionpack/lib/action_controller/metal/rack_delegation.rb
<ide> module ClassMethods
<ide> def build_with_env(env = {}) #:nodoc:
<ide> new.tap { |c| c.set_request! ActionDispatch::Request.new(env) }
<ide> end
<del>
<del> def make_response!(request)
<del> ActionDispatch::Response.new.tap do |res|
<del> res.request = request
<del> end
<del> end
<del> end
<del>
<del> def set_request!(request) #:nodoc:
<del> super
<del> set_response!(request)
<ide> end
<ide>
<ide> def response_body=(body)
<ide> def response_body=(body)
<ide> def reset_session
<ide> @_request.reset_session
<ide> end
<del>
<del> private
<del>
<del> def set_response!(request)
<del> @_response = self.class.make_response! request
<del> end
<ide> end
<ide> end
<ide><path>actionpack/lib/action_dispatch/http/response.rb
<ide> class Response
<ide> CONTENT_TYPE = "Content-Type".freeze
<ide> SET_COOKIE = "Set-Cookie".freeze
<ide> LOCATION = "Location".freeze
<del> NO_CONTENT_CODES = [204, 304]
<add> NO_CONTENT_CODES = [100, 101, 102, 204, 205, 304]
<ide>
<ide> cattr_accessor(:default_charset) { "utf-8" }
<ide> cattr_accessor(:default_headers)
<ide> def rack_response(status, header)
<ide>
<ide> if NO_CONTENT_CODES.include?(@status)
<ide> header.delete CONTENT_TYPE
<add> header.delete 'Content-Length'
<ide> [status, header, []]
<ide> else
<ide> [status, header, RackBody.new(self)]
<ide><path>actionpack/lib/action_dispatch/routing/route_set.rb
<ide> def initialize(raise_on_name_error)
<ide> def dispatcher?; true; end
<ide>
<ide> def serve(req)
<del> params = req.path_parameters
<del> dispatch(controller(req), params[:action], req)
<add> params = req.path_parameters
<add> controller = controller req
<add> res = controller.make_response! req
<add> dispatch(controller, params[:action], req, res)
<ide> rescue NameError => e
<ide> if @raise_on_name_error
<ide> raise ActionController::RoutingError, e.message, e.backtrace
<ide> def controller(req)
<ide> req.controller_class
<ide> end
<ide>
<del> def dispatch(controller, action, req)
<del> controller.dispatch(action, req)
<add> def dispatch(controller, action, req, res)
<add> controller.dispatch(action, req, res)
<ide> end
<ide> end
<ide>
<ide><path>actionpack/test/abstract_unit.rb
<ide> def self.build_app(routes = nil)
<ide> class DeadEndRoutes < ActionDispatch::Routing::RouteSet
<ide> # Stub Rails dispatcher so it does not get controller references and
<ide> # simply return the controller#action as Rack::Body.
<del> class NullController
<add> class NullController < ::ActionController::Metal
<ide> def initialize(controller_name)
<ide> @controller = controller_name
<ide> end
<ide>
<del> def dispatch(action, req)
<add> def make_response!(request)
<add> self.class.make_response! request
<add> end
<add>
<add> def dispatch(action, req, res)
<ide> [200, {'Content-Type' => 'text/html'}, ["#{@controller}##{action}"]]
<ide> end
<ide> end
<ide><path>actionpack/test/controller/base_test.rb
<ide> def test_no_deprecation_when_action_view_record_identifier_is_included
<ide> class ControllerInstanceTests < ActiveSupport::TestCase
<ide> def setup
<ide> @empty = EmptyController.new
<add> @empty.set_request!(ActionDispatch::Request.new({}))
<add> @empty.set_response!(EmptyController.make_response!(@empty.request))
<ide> @contained = Submodule::ContainedEmptyController.new
<ide> @empty_controllers = [@empty, @contained]
<ide> end
<ide><path>actionpack/test/controller/new_base/bare_metal_test.rb
<ide> class BareTest < ActiveSupport::TestCase
<ide>
<ide> test "response_body value is wrapped in an array when the value is a String" do
<ide> controller = BareController.new
<add> controller.set_request!(ActionDispatch::Request.new({}))
<add> controller.set_response!(BareController.make_response!(controller.request))
<ide> controller.index
<ide> assert_equal ["Hello world"], controller.response_body
<ide> end
<ide><path>actionpack/test/controller/new_base/render_html_test.rb
<ide> class RenderHtmlTest < Rack::TestCase
<ide>
<ide> test "rendering from minimal controller returns response with text/html content type" do
<ide> get "/render_html/minimal/index"
<del> assert_content_type "text/html"
<add> assert_content_type "text/html; charset=utf-8"
<ide> end
<ide>
<ide> test "rendering from normal controller returns response with text/html content type" do
<ide><path>actionpack/test/controller/new_base/render_plain_test.rb
<ide> class RenderPlainTest < Rack::TestCase
<ide>
<ide> test "rendering from minimal controller returns response with text/plain content type" do
<ide> get "/render_plain/minimal/index"
<del> assert_content_type "text/plain"
<add> assert_content_type "text/plain; charset=utf-8"
<ide> end
<ide>
<ide> test "rendering from normal controller returns response with text/plain content type" do | 10 |
Python | Python | fix progbar dynamic display | 7fc5466f95e2f9e19c8ab7602286f642a136091b | <ide><path>keras/utils/generic_utils.py
<ide> def __init__(self, target, width=30, verbose=1, interval=0.05):
<ide> self.interval = interval
<ide> self.total_width = 0
<ide> self.seen_so_far = 0
<del> self.verbose = verbose
<del> self.is_jupyter = 'ipykernel' in sys.modules
<add> self.verbose = verbose
<add> self._dynamic_display = (sys.stdout.isatty() or
<add> 'ipykernel' in sys.modules)
<ide>
<ide> def update(self, current, values=None, force=False):
<ide> """Updates the progress bar.
<ide> def update(self, current, values=None, force=False):
<ide> return
<ide>
<ide> prev_total_width = self.total_width
<del> if sys.stdout.isatty() or self.is_jupyter:
<add> if self._dynamic_display:
<ide> sys.stdout.write('\b' * prev_total_width)
<ide> sys.stdout.write('\r')
<ide> else:
<ide> def update(self, current, values=None, force=False):
<ide> for k in self.unique_values:
<ide> info += ' - %s:' % k
<ide> if isinstance(self.sum_values[k], list):
<del> avg = np.mean(self.sum_values[k][0] / max(1, self.sum_values[k][1]))
<add> avg = np.mean(
<add> self.sum_values[k][0] / max(1, self.sum_values[k][1]))
<ide> if abs(avg) > 1e-3:
<ide> info += ' %.4f' % avg
<ide> else:
<ide> def update(self, current, values=None, force=False):
<ide> if self.target is None or current >= self.target:
<ide> for k in self.unique_values:
<ide> info += ' - %s:' % k
<del> avg = np.mean(self.sum_values[k][0] / max(1, self.sum_values[k][1]))
<add> avg = np.mean(
<add> self.sum_values[k][0] / max(1, self.sum_values[k][1]))
<ide> if avg > 1e-3:
<ide> info += ' %.4f' % avg
<ide> else: | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.