content_type stringclasses 8 values | main_lang stringclasses 7 values | message stringlengths 1 50 | sha stringlengths 40 40 | patch stringlengths 52 962k | file_count int64 1 300 |
|---|---|---|---|---|---|
Go | Go | add test case for network restore | 7d0689a8d35e4dd7d569e552957a20ba287c95e6 | <ide><path>integration-cli/docker_cli_network_unix_test.go
<ide> func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *c
<ide> dockerCmd(c, "network", "rm", "kiwl$%^")
<ide> assertNwNotAvailable(c, "kiwl$%^")
<ide> }
<add>
<add>func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) {
<add> testRequires(t, DaemonIsLinux)
<add> if err := s.d.StartWithBusybox("--live-restore"); err != nil {
<add> t.Fatal(err)
<add> }
<add> defer s.d.Stop()
<add> oldCon := "old"
<add>
<add> _, err := s.d.Cmd("run", "-d", "--name", oldCon, "-p", "80:80", "busybox", "top")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> oldContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", oldCon)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> // Kill the daemon
<add> if err := s.d.Kill(); err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> // restart the daemon
<add> if err := s.d.Start("--live-restore"); err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> // start a new container, the new container's ip should not be the same with
<add> // old running container.
<add> newCon := "new"
<add> _, err = s.d.Cmd("run", "-d", "--name", newCon, "busybox", "top")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> newContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", newCon)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> if strings.Compare(strings.TrimSpace(oldContainerIP), strings.TrimSpace(newContainerIP)) == 0 {
<add> t.Fatalf("new container ip should not equal to old running container ip")
<add> }
<add>
<add> // start a new container, the new container should ping old running container
<add> _, err = s.d.Cmd("run", "-t", "busybox", "ping", "-c", "1", oldContainerIP)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> // start a new container try to publist port 80:80 will failed
<add> out, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top")
<add> if err == nil || !strings.Contains(out, "Bind for 0.0.0.0:80 failed: port is already allocated") {
<add> t.Fatalf("80 port is allocated to old running container, it should failed on allocating to new container")
<add> }
<add>
<add> // kill old running container and try to allocate again
<add> _, err = s.d.Cmd("kill", oldCon)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> _, err = s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>} | 1 |
Javascript | Javascript | transfer everything from element onto the fiber | aa14d89c7da229e1bf162414bff87fd020d7218b | <ide><path>src/renderers/shared/fiber/ReactFiber.js
<ide> export type Fiber = {
<ide> child: ?Fiber,
<ide> sibling: ?Fiber,
<ide>
<del> // Input is the data coming into process this fiber. Arguments.
<add> // Unique identifier of this child.
<add> key: ?string,
<add>
<add> // The function/class/module associated with this fiber.
<add> type: any,
<add>
<add> // The ref last used to attach this node.
<add> // I'll avoid adding an owner field for prod and model that as functions.
<add> ref: null | (handle : ?Object) => void,
<add>
<add> // Input is the data coming into process this fiber. Arguments. Props.
<ide> input: any, // This type will be more specific once we overload the tag.
<ide> // Output is the return value of this fiber, or a linked list of return values
<ide> // if this returns multiple values. Such as a fragment.
<ide> export type Fiber = {
<ide>
<ide> };
<ide>
<del>var createFiber = function(tag : number) : Fiber {
<add>var createFiber = function(tag : number, key : null | string) : Fiber {
<ide> return {
<ide>
<ide> tag: tag,
<ide> var createFiber = function(tag : number) : Fiber {
<ide> child: null,
<ide> sibling: null,
<ide>
<add> key: key,
<add> type: null,
<add> ref: null,
<add>
<ide> input: null,
<ide> output: null,
<ide>
<ide> function shouldConstruct(Component) {
<ide> }
<ide>
<ide> exports.createFiberFromElement = function(element : ReactElement) {
<del> const fiber = exports.createFiberFromElementType(element.type);
<del> if (typeof element.type === 'object') {
<del> // Hacky McHack
<del> element = ReactElement(fiber.input, null, element.ref, null, null, null, element.props);
<del> }
<del> fiber.input = element;
<add> const fiber = exports.createFiberFromElementType(element.type, element.key);
<add> fiber.input = element.props;
<ide> return fiber;
<ide> };
<ide>
<del>exports.createFiberFromElementType = function(type : mixed) {
<add>exports.createFiberFromElementType = function(type : mixed, key : null | string) {
<ide> let fiber;
<ide> if (typeof type === 'function') {
<ide> fiber = shouldConstruct(type) ?
<del> createFiber(ClassComponent) :
<del> createFiber(IndeterminateComponent);
<add> createFiber(ClassComponent, key) :
<add> createFiber(IndeterminateComponent, key);
<add> fiber.type = type;
<ide> } else if (typeof type === 'string') {
<del> fiber = createFiber(HostComponent);
<add> fiber = createFiber(HostComponent, key);
<add> fiber.type = type;
<ide> } else if (typeof type === 'object' && type !== null) {
<ide> // Currently assumed to be a continuation and therefore is a fiber already.
<ide> fiber = type;
<ide> exports.createFiberFromElementType = function(type : mixed) {
<ide> };
<ide>
<ide> exports.createFiberFromCoroutine = function(coroutine : ReactCoroutine) {
<del> const fiber = createFiber(CoroutineComponent);
<add> const fiber = createFiber(CoroutineComponent, coroutine.key);
<add> fiber.type = coroutine.handler;
<ide> fiber.input = coroutine;
<ide> return fiber;
<ide> };
<ide>
<ide> exports.createFiberFromYield = function(yieldNode : ReactYield) {
<del> const fiber = createFiber(YieldComponent);
<add> const fiber = createFiber(YieldComponent, yieldNode.key);
<ide> return fiber;
<ide> };
<ide><path>src/renderers/shared/fiber/ReactFiberBeginWork.js
<ide> var {
<ide> YieldComponent,
<ide> } = ReactTypesOfWork;
<ide>
<del>function getElement(unitOfWork) : ReactElement {
<del> var element = unitOfWork.input;
<del> if (!element) {
<del> throw new Error('Should be resolved by now');
<del> }
<del> return (element : ReactElement);
<del>}
<del>
<ide> function updateFunctionalComponent(unitOfWork) {
<del> var element = getElement(unitOfWork);
<del> var fn = element.type;
<del> var props = element.props;
<add> var fn = unitOfWork.type;
<add> var props = unitOfWork.input;
<ide> console.log('perform work on:', fn.name);
<ide> var nextChildren = fn(props);
<ide>
<ide> function updateFunctionalComponent(unitOfWork) {
<ide> }
<ide>
<ide> function updateHostComponent(unitOfWork) {
<del> var element = getElement(unitOfWork);
<del> console.log('host component', element.type, typeof element.props.children === 'string' ? element.props.children : '');
<add> console.log('host component', unitOfWork.type, typeof unitOfWork.input.children === 'string' ? unitOfWork.input.children : '');
<ide>
<del> var nextChildren = element.props.children;
<add> var nextChildren = unitOfWork.input.children;
<ide> unitOfWork.child = ReactChildFiber.reconcileChildFibers(
<ide> unitOfWork,
<ide> unitOfWork.child,
<ide> function updateHostComponent(unitOfWork) {
<ide> }
<ide>
<ide> function mountIndeterminateComponent(unitOfWork) {
<del> var element = getElement(unitOfWork);
<del> var fn = element.type;
<del> var props = element.props;
<add> var fn = unitOfWork.type;
<add> var props = unitOfWork.input;
<ide> var value = fn(props);
<ide> if (typeof value === 'object' && value && typeof value.render === 'function') {
<ide> console.log('performed work on class:', fn.name);
<ide> function updateCoroutineComponent(unitOfWork) {
<ide> if (!coroutine) {
<ide> throw new Error('Should be resolved by now');
<ide> }
<del> console.log('begin coroutine', coroutine.handler.name);
<add> console.log('begin coroutine', unitOfWork.type.name);
<ide> unitOfWork.child = ReactChildFiber.reconcileChildFibers(
<ide> unitOfWork,
<ide> unitOfWork.child,
<ide><path>src/renderers/shared/fiber/ReactFiberCompleteWork.js
<ide> function handleCoroutine(unitOfWork : Fiber) {
<ide> exports.completeWork = function(unitOfWork : Fiber) : ?Fiber {
<ide> switch (unitOfWork.tag) {
<ide> case FunctionalComponent:
<del> console.log('/functional component', unitOfWork.input.type.name);
<add> console.log('/functional component', unitOfWork.type.name);
<ide> transferOutput(unitOfWork.child, unitOfWork);
<ide> break;
<ide> case ClassComponent:
<del> console.log('/class component', unitOfWork.input.type.name);
<add> console.log('/class component', unitOfWork.type.name);
<ide> transferOutput(unitOfWork.child, unitOfWork);
<ide> break;
<ide> case HostComponent:
<del> console.log('/host component', unitOfWork.input.type);
<add> console.log('/host component', unitOfWork.type);
<ide> break;
<ide> case CoroutineComponent:
<ide> console.log('/coroutine component', unitOfWork.input.handler.name);
<ide><path>src/renderers/shared/fiber/ReactReifiedYield.js
<ide> var ReactFiber = require('ReactFiber');
<ide> export type ReifiedYield = { continuation: Fiber, props: Object };
<ide>
<ide> exports.createReifiedYield = function(yieldNode : ReactYield) : ReifiedYield {
<del> var fiber = ReactFiber.createFiberFromElementType(yieldNode.continuation);
<del> // Hacky way to store the continuation
<del> fiber.input = yieldNode.continuation;
<add> var fiber = ReactFiber.createFiberFromElementType(
<add> yieldNode.continuation,
<add> yieldNode.key
<add> );
<ide> return {
<ide> continuation: fiber,
<ide> props: yieldNode.props,
<ide><path>src/renderers/shared/fiber/isomorphic/ReactCoroutine.js
<ide> type CoroutineHandler<T> = (props: T, yields: Array<ReifiedYield>) => ReactNodeL
<ide>
<ide> export type ReactCoroutine = {
<ide> $$typeof: Symbol | number,
<del> key: ?string,
<add> key: null | string,
<ide> children: any,
<ide> // This should be a more specific CoroutineHandler
<ide> handler: (props: any, yields: Array<ReifiedYield>) => ReactNodeList,
<ide> props: mixed,
<ide> };
<ide> export type ReactYield = {
<ide> $$typeof: Symbol | number,
<del> key: ?string,
<add> key: null | string,
<ide> props: Object,
<ide> continuation: mixed
<ide> }; | 5 |
Ruby | Ruby | avoid double wait in eventedfileupdatecheckertest | 86c732a606209c05b3cd425b4d4ae3765ac0852d | <ide><path>activesupport/test/evented_file_update_checker_test.rb
<ide> def wait
<ide> sleep 1
<ide> end
<ide>
<add> def mkdir(dirs)
<add> super
<add> wait # wait for the events to fire
<add> end
<add>
<ide> def touch(files)
<ide> super
<ide> wait # wait for the events to fire
<ide> end
<ide>
<add> def rm_f(files)
<add> super
<add> wait # wait for the events to fire
<add> end
<add>
<ide> test "notifies forked processes" do
<ide> skip "Forking not available" unless Process.respond_to?(:fork)
<ide>
<ide> def touch(files)
<ide>
<ide> assert_not_predicate checker, :updated?
<ide>
<del> FileUtils.touch(File.join(actual_dir, "a.rb"))
<del> wait
<add> touch(File.join(actual_dir, "a.rb"))
<ide>
<ide> assert_predicate checker, :updated?
<ide> assert checker.execute_if_updated
<ide> def touch(files)
<ide>
<ide> checker = new_checker([], watched_dir => ".rb", not_exist_watched_dir => ".rb") { }
<ide>
<del> FileUtils.touch(File.join(watched_dir, "a.rb"))
<del> wait
<add> touch(File.join(watched_dir, "a.rb"))
<ide> assert_predicate checker, :updated?
<ide> assert checker.execute_if_updated
<ide>
<ide> def touch(files)
<ide> assert_predicate checker, :updated?
<ide> assert checker.execute_if_updated
<ide>
<del> FileUtils.touch(File.join(unwatched_dir, "a.rb"))
<del> wait
<add> touch(File.join(unwatched_dir, "a.rb"))
<ide> assert_not_predicate checker, :updated?
<ide> assert_not checker.execute_if_updated
<ide> end
<ide><path>activesupport/test/file_update_checker_shared_tests.rb
<ide> module FileUpdateCheckerSharedTests
<ide> def self.included(kls)
<ide> kls.class_eval do
<ide> extend ActiveSupport::Testing::Declarative
<del> include FileUtils
<ide>
<ide> def tmpdir
<ide> @tmpdir
<ide> def run(*args)
<ide> checker = new_checker(tmpfiles) { i += 1 }
<ide>
<ide> touch(tmpfiles)
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide> def run(*args)
<ide> checker = new_checker(tmpfiles) { i += 1 }
<ide>
<ide> touch(tmpfiles)
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide> def run(*args)
<ide> checker = new_checker(tmpfiles) { i += 1 }
<ide>
<ide> rm_f(tmpfiles)
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide> def run(*args)
<ide> assert_not_predicate checker, :updated?
<ide>
<ide> touch(tmpfiles)
<del> wait
<ide>
<ide> assert_predicate checker, :updated?
<ide> end
<ide> def run(*args)
<ide> assert_not_predicate checker, :updated?
<ide>
<ide> touch(tmpfiles)
<del> wait
<ide>
<ide> assert_predicate checker, :updated?
<ide> end
<ide> def run(*args)
<ide> assert_not_predicate checker, :updated?
<ide>
<ide> rm_f(tmpfiles)
<del> wait
<ide>
<ide> assert_predicate checker, :updated?
<ide> end
<ide> def run(*args)
<ide> checker = new_checker(tmpfiles) { i += 1 }
<ide>
<ide> touch(tmpfiles[1..-1])
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide> def run(*args)
<ide> checker = new_checker(tmpfiles) { i += 1 }
<ide>
<ide> touch(tmpfiles[1..-1])
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide> def run(*args)
<ide> assert_not_predicate checker, :updated?
<ide>
<ide> touch(tmpfiles)
<del> wait
<ide>
<ide> assert_predicate checker, :updated?
<ide> checker.execute
<ide> def run(*args)
<ide> checker = new_checker([], tmpdir => :rb) { i += 1 }
<ide>
<ide> touch(tmpfile("foo.rb"))
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide> def run(*args)
<ide> checker = new_checker([], tmpdir => []) { i += 1 }
<ide>
<ide> touch(tmpfile("foo.rb"))
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide> def run(*args)
<ide> checker = new_checker([], tmpdir => [:rb, :txt]) { i += 1 }
<ide>
<ide> touch(tmpfile("foo.rb"))
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide>
<ide> touch(tmpfile("foo.txt"))
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 2, i
<ide> def run(*args)
<ide> checker = new_checker([], tmpdir => :txt) { i += 1 }
<ide>
<ide> touch(tmpfile("foo.rb"))
<del> wait
<ide>
<ide> assert_not checker.execute_if_updated
<ide> assert_equal 0, i
<ide> def run(*args)
<ide> checker = new_checker([non_existing]) { i += 1 }
<ide>
<ide> touch(non_existing)
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide> def run(*args)
<ide>
<ide> subdir = tmpfile("subdir")
<ide> mkdir(subdir)
<del> wait
<ide>
<ide> assert_not checker.execute_if_updated
<ide> assert_equal 0, i
<ide>
<ide> touch(File.join(subdir, "nested.rb"))
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide> def run(*args)
<ide> i = 0
<ide>
<ide> subdir = tmpfile("subdir")
<del> mkdir(subdir)
<add> FileUtils.mkdir(subdir)
<ide>
<ide> checker = new_checker([], tmpdir => :rb, subdir => :txt) { i += 1 }
<ide>
<ide> touch(tmpfile("new.txt"))
<del> wait
<ide>
<ide> assert_not checker.execute_if_updated
<ide> assert_equal 0, i
<ide>
<ide> # subdir does not look for Ruby files, but its parent tmpdir does.
<ide> touch(File.join(subdir, "nested.rb"))
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 1, i
<ide>
<ide> touch(File.join(subdir, "nested.txt"))
<del> wait
<ide>
<ide> assert checker.execute_if_updated
<ide> assert_equal 2, i
<ide> def run(*args)
<ide> end
<ide> end
<ide> end
<add>
<add> private
<add> def mkdir(dirs)
<add> FileUtils.mkdir(dirs)
<add> end
<add>
<add> def touch(files)
<add> FileUtils.touch(files)
<add> end
<add>
<add> def rm_f(files)
<add> FileUtils.rm_f(files)
<add> end
<ide> end
<ide><path>activesupport/test/file_update_checker_test.rb
<ide> def new_checker(files = [], dirs = {}, &block)
<ide> ActiveSupport::FileUpdateChecker.new(files, dirs, &block)
<ide> end
<ide>
<del> def wait
<del> # noop
<del> end
<del>
<ide> def touch(files)
<ide> sleep 1 # let's wait a bit to ensure there's a new mtime
<ide> super | 3 |
PHP | PHP | normalize controllerinterface on responseinterface | 4978c493795fabd85e79a3b79823efa015625320 | <ide><path>src/Controller/Controller.php
<ide> public function implementedEvents(): array
<ide> * - Calls the controller `beforeFilter`.
<ide> * - triggers Component `startup` methods.
<ide> *
<del> * @return \Cake\Http\Response|null
<add> * @return \Psr\Http\Message\ResponseInterface|null
<ide> */
<del> public function startupProcess(): ?Response
<add> public function startupProcess(): ?ResponseInterface
<ide> {
<ide> $event = $this->dispatchEvent('Controller.initialize');
<del> if ($event->getResult() instanceof Response) {
<add> if ($event->getResult() instanceof ResponseInterface) {
<ide> return $event->getResult();
<ide> }
<ide> $event = $this->dispatchEvent('Controller.startup');
<del> if ($event->getResult() instanceof Response) {
<add> if ($event->getResult() instanceof ResponseInterface) {
<ide> return $event->getResult();
<ide> }
<ide>
<ide> public function startupProcess(): ?Response
<ide> * - triggers the component `shutdown` callback.
<ide> * - calls the Controller's `afterFilter` method.
<ide> *
<del> * @return \Cake\Http\Response|null
<add> * @return \Psr\Http\Message\ResponseInterface|null
<ide> */
<del> public function shutdownProcess(): ?Response
<add> public function shutdownProcess(): ?ResponseInterface
<ide> {
<ide> $event = $this->dispatchEvent('Controller.shutdown');
<del> if ($event->getResult() instanceof Response) {
<add> if ($event->getResult() instanceof ResponseInterface) {
<ide> return $event->getResult();
<ide> }
<ide>
<ide> public function redirect($url, int $status = 302): ?Response
<ide> }
<ide>
<ide> $event = $this->dispatchEvent('Controller.beforeRedirect', [$url, $this->response]);
<del> if ($event->getResult() instanceof Response) {
<add> if ($event->getResult() instanceof ResponseInterface) {
<ide> return $this->response = $event->getResult();
<ide> }
<ide> if ($event->isStopped()) {
<ide><path>src/Http/ControllerInterface.php
<ide> interface ControllerInterface
<ide> * - Calls the controller `beforeFilter`.
<ide> * - triggers Component `startup` methods.
<ide> *
<del> * @return \Cake\Http\Response|null
<add> * @return \Psr\Http\Message\ResponseInterface|null
<ide> */
<del> public function startupProcess(): ?Response;
<add> public function startupProcess(): ?ResponseInterface;
<ide>
<ide> /**
<ide> * Perform the various shutdown processes for this controller.
<ide> public function startupProcess(): ?Response;
<ide> * - triggers the component `shutdown` callback.
<ide> * - calls the Controller's `afterFilter` method.
<ide> *
<del> * @return \Cake\Http\Response|null
<add> * @return \Psr\Http\Message\ResponseInterface|null
<ide> */
<del> public function shutdownProcess(): ?Response;
<add> public function shutdownProcess(): ?ResponseInterface;
<ide>
<ide> /**
<ide> * Dispatches the controller action. Checks that the action
<ide><path>tests/test_app/TestApp/Controller/CakesController.php
<ide> namespace TestApp\Controller;
<ide>
<ide> use Cake\Controller\Controller;
<del>use Cake\Http\Response;
<add>use Psr\Http\Message\ResponseInterface;
<ide>
<ide> /**
<ide> * CakesController class
<ide> public function invalid()
<ide> /**
<ide> * Startup process
<ide> *
<del> * \Cake\Http\Response|null
<add> * \Psr\Http\Message\ResponseInterface|null
<ide> */
<del> public function startupProcess(): ?Response
<add> public function startupProcess(): ?ResponseInterface
<ide> {
<ide> parent::startupProcess();
<ide> if ($this->request->getParam('stop') === 'startup') {
<ide> public function startupProcess(): ?Response
<ide> /**
<ide> * Shutdown process
<ide> *
<del> * \Cake\Http\Response|null
<add> * \Psr\Http\Message\ResponseInterface|null
<ide> */
<del> public function shutdownProcess(): ?Response
<add> public function shutdownProcess(): ?ResponseInterface
<ide> {
<ide> parent::shutdownProcess();
<ide> if ($this->request->getParam('stop') === 'shutdown') { | 3 |
Ruby | Ruby | deprecate the eventedredis subscription adapter | 3721b859b605233ae577453532e71b0a969e8116 | <ide><path>actioncable/lib/action_cable/subscription_adapter/evented_redis.rb
<ide> class EventedRedis < Base # :nodoc:
<ide> cattr_accessor(:redis_connector) { ->(config) { ::Redis.new(url: config[:url]) } }
<ide>
<ide> def initialize(*)
<add> ActiveSupport::Deprecation.warn(<<-MSG.squish)
<add> The "evented_redis" subscription adapter is deprecated and
<add> will be removed in Rails 5.2. Please use the "redis" adapter
<add> instead.
<add> MSG
<add>
<ide> super
<ide> @redis_connection_for_broadcasts = @redis_connection_for_subscriptions = nil
<ide> end
<ide><path>actioncable/test/subscription_adapter/evented_redis_test.rb
<ide> class EventedRedisAdapterTest < ActionCable::TestCase
<ide> include ChannelPrefixTest
<ide>
<ide> def setup
<del> super
<add> assert_deprecated do
<add> super
<add> end
<ide>
<ide> # em-hiredis is warning-rich
<ide> @previous_verbose, $VERBOSE = $VERBOSE, nil | 2 |
Ruby | Ruby | treat any limit > 4 as bigint | 290e1e2fc53d80165cc876491ec0cbe18be376cf | <ide><path>activerecord/lib/active_record/connection_adapters/mysql_adapter.rb
<ide> def extract_limit(sql_type)
<ide> else
<ide> super # we could return 65535 here, but we leave it undecorated by default
<ide> end
<del> when /^int/i; 4
<ide> when /^bigint/i; 8
<del> when /^smallint/i; 2
<add> when /^int/i; 4
<ide> when /^mediumint/i; 3
<add> when /^smallint/i; 2
<add> when /^tinyint/i; 1
<ide> else
<ide> super
<ide> end
<ide> def type_to_sql(type, limit = nil, precision = nil, scale = nil)
<ide> return super unless type.to_s == 'integer'
<ide>
<ide> case limit
<del> when 1..2; 'smallint'
<del> when 3; 'mediumint'
<del> when 4, nil; 'int(11)'
<del> when 5..8; 'bigint'
<add> when 1; 'tinyint'
<add> when 2; 'smallint'
<add> when 3; 'mediumint'
<add> when 4, nil; 'int(11)'
<add> else; 'bigint'
<ide> end
<ide> end
<ide> | 1 |
Ruby | Ruby | move session settings closer together | b50a05cac32f7e1f70ea622e2c82972c87391dfa | <ide><path>railties/environments/environment.rb
<ide> # (by default production uses :info, the others :debug)
<ide> # config.log_level = :debug
<ide>
<add> # Your secret key for verifying cookie session data integrity.
<add> # If you change this key, all old sessions will become invalid!
<add> config.action_controller.session = {
<add> :session_key => '_<%= app_name %>_session',
<add> :secret => '<%= CGI::Session.generate_unique_id(app_name) %>'
<add> }
<add>
<ide> # Use the database for sessions instead of the file system
<ide> # (create the session table with 'rake db:sessions:create')
<ide> # config.action_controller.session_store = :active_record_store
<ide> # Make Active Record use UTC-base instead of local time
<ide> # config.active_record.default_timezone = :utc
<ide>
<del> # Your secret key for verifying cookie session data integrity.
<del> # If you change this key, all old sessions will become invalid!
<del> config.action_controller.session = {
<del> :session_key => '_<%= app_name %>_session',
<del> :secret => '<%= CGI::Session.generate_unique_id(app_name) %>'
<del> }
<del>
<ide> # See Rails::Configuration for more options
<ide> end
<ide> | 1 |
Python | Python | allow dynamic shape for repeat_elements | 98238720ee0f0ed9b9d21c6f9f234206d9e90b9e | <ide><path>keras/backend/tensorflow_backend.py
<ide> def repeat_elements(x, rep, axis):
<ide> rep: Python integer, number of times to repeat.
<ide> axis: Axis along which to repeat.
<ide>
<del> # Raises
<del> ValueError: In case `x.shape[axis]` is undefined.
<del>
<ide> # Returns
<ide> A tensor.
<ide> """
<ide> x_shape = x.get_shape().as_list()
<del> if x_shape[axis] is None:
<del> raise ValueError('Axis ' + str(axis) + ' of input tensor '
<del> 'should have a defined dimension, but is None. '
<del> 'Full tensor shape: ' + str(tuple(x_shape)) + '. '
<del> 'Typically you need to pass a fully-defined '
<del> '`input_shape` argument to your first layer.')
<del> # slices along the repeat axis
<del> splits = tf.split(value=x, num_or_size_splits=x_shape[axis], axis=axis)
<del> # repeat each slice the given number of reps
<del> x_rep = [s for s in splits for _ in range(rep)]
<del> return concatenate(x_rep, axis)
<add> # For static axis
<add> if x_shape[axis] is not None:
<add> # slices along the repeat axis
<add> splits = tf.split(value=x, num_or_size_splits=x_shape[axis], axis=axis)
<add> # repeat each slice the given number of reps
<add> x_rep = [s for s in splits for _ in range(rep)]
<add> return concatenate(x_rep, axis)
<add>
<add> # Here we use tf.tile to mimic behaviour of np.repeat so that
<add> # we can handle dynamic shapes (that include None).
<add> # To do that, we need an auxiliary axis to repeat elements along
<add> # it and then merge them along the desired axis.
<add>
<add> # Repeating
<add> auxiliary_axis = axis + 1
<add> x_shape = tf.shape(x)
<add> x_rep = tf.expand_dims(x, axis=auxiliary_axis)
<add> reps = np.ones(len(x.get_shape()) + 1)
<add> reps[auxiliary_axis] = rep
<add> x_rep = tf.tile(x_rep, reps)
<add>
<add> # Merging
<add> reps = np.delete(reps, auxiliary_axis)
<add> reps[axis] = rep
<add> reps = tf.constant(reps, dtype='int32')
<add> x_shape = x_shape * reps
<add> x_rep = tf.reshape(x_rep, x_shape)
<add>
<add> # Fix shape representation
<add> x_shape = x.get_shape().as_list()
<add> x_rep.set_shape(x_shape)
<add> x_rep._keras_shape = tuple(x_shape)
<add> return x_rep
<ide>
<ide>
<ide> def repeat(x, n):
<ide><path>tests/keras/backend/backend_test.py
<ide> def test_repeat_elements(self):
<ide> rep=reps, axis=rep_axis,
<ide> assert_value_with_ref=np_rep)
<ide>
<del> # test theano shape inference when
<del> # input shape has None entries
<del> if K.backend() == 'theano':
<add> if K.backend() != 'cntk':
<ide> shape = list(shape)
<ide> shape[rep_axis] = None
<ide> x = K.placeholder(shape=shape)
<ide> y = K.repeat_elements(x, reps, axis=rep_axis)
<ide> assert y._keras_shape == tuple(shape)
<del>
<del> # Test invalid use cases
<del> with pytest.raises(ValueError):
<del> ztf = KTF.placeholder(shape=(None, 2, 3))
<del> KTF.repeat_elements(ztf, 5, axis=0)
<add> if K.backend() == 'tensorflow':
<add> assert y._keras_shape == tuple(y.get_shape().as_list())
<ide>
<ide> def test_tile(self):
<ide> shape = (3, 4) | 2 |
Text | Text | add tao of redux links | 6ce5f0f5acb73fdd01ea4f363ed53ef0b79d2f37 | <ide><path>docs/tutorials/fundamentals/part-7-standard-patterns.md
<ide> To learn more about how to use Reselect and memoized selectors, see:
<ide>
<ide> As you've seen, there's several additional patterns that are widely used in Redux apps. These patterns do involve writing more code, but they provide benefits like making logic reusable, encapsulating implementation details, improving app performance, and making it easier to look up data.
<ide>
<add>:::info
<add>
<add>For more details on why these patterns exist and how Redux is meant to be used, see:
<add>
<add>- [Idiomatic Redux: The Tao of Redux, Part 1 - Implementation and Intent](https://blog.isquaredsoftware.com/2017/05/idiomatic-redux-tao-of-redux-part-1/)
<add>- [Idiomatic Redux: The Tao of Redux, Part 2 - Practice and Philosophy](https://blog.isquaredsoftware.com/2017/05/idiomatic-redux-tao-of-redux-part-2/)
<add>
<add>:::
<add>
<ide> Here's how our app looks after it's been fully converted to use these patterns:
<ide>
<ide> **FIXME Add CodeSandbox here** | 1 |
PHP | PHP | change wheredate method for postgres | 551cebeb143d43d23b57fe9c9dfaba4464243ac8 | <ide><path>src/Illuminate/Database/Query/Grammars/PostgresGrammar.php
<ide> protected function whereDate(Builder $query, $where)
<ide> {
<ide> $value = $this->parameter($where['value']);
<ide>
<del> return $this->wrap($where['column']).' '.$where['operator'].' '.$value.'::date';
<add> return $this->wrap($where['column']).'::date '.$where['operator'].' '.$value;
<ide> }
<ide>
<ide> /**
<ide><path>tests/Database/DatabaseQueryBuilderTest.php
<ide> public function testWhereDatePostgres()
<ide> {
<ide> $builder = $this->getPostgresBuilder();
<ide> $builder->select('*')->from('users')->whereDate('created_at', '=', '2015-12-21');
<del> $this->assertEquals('select * from "users" where "created_at" = ?::date', $builder->toSql());
<add> $this->assertEquals('select * from "users" where "created_at"::date = ?', $builder->toSql());
<ide> $this->assertEquals([0 => '2015-12-21'], $builder->getBindings());
<ide> }
<ide> | 2 |
Ruby | Ruby | remove syntax test | 6154182b13642de41125403d3be9959814d0afc3 | <ide><path>Library/Homebrew/cask/test/syntax_test.rb
<del>require "test_helper"
<del>
<del>describe "Syntax check" do
<del> project_root = Pathname.new(File.expand_path("#{File.dirname(__FILE__)}/../"))
<del> backend_files = Dir[project_root.join("**", "*.rb")].reject { |f| f.match %r{/vendor/|/Casks/} }
<del> interpreter = RUBY_PATH
<del> flags = %w[-c]
<del> flags.unshift "--disable-all"
<del> backend_files.each do |file|
<del> it "#{file} is valid Ruby" do
<del> args = flags + ["--", file]
<del> shutup do
<del> raise SyntaxError, "#{file} failed syntax check" unless system(interpreter, *args)
<del> end
<del> end
<del> end
<del>end | 1 |
Javascript | Javascript | use scope names rather than names | 2bf9e4b0c7b1a4a9ba45b6ce78a69a4f06023ac6 | <ide><path>spec/workspace-spec.js
<ide> i = /test/; #FIXME\
<ide> atom2.project.deserialize(atom.project.serialize())
<ide> atom2.workspace.deserialize(atom.workspace.serialize(), atom2.deserializers)
<ide>
<del> expect(atom2.grammars.getGrammars().map(grammar => grammar.name).sort()).toEqual([
<del> 'CoffeeScript',
<del> 'CoffeeScript (Literate)',
<del> 'JSDoc',
<del> 'JavaScript',
<del> 'Null Grammar',
<del> 'Regular Expression Replacement (JavaScript)',
<del> 'Regular Expressions (JavaScript)',
<del> 'TODO'
<add> expect(atom2.grammars.getGrammars().map(grammar => grammar.scopeName).sort()).toEqual([
<add> 'source.coffee',
<add> 'source.js',
<add> 'source.js.regexp',
<add> 'source.js.regexp.replacement',
<add> 'source.jsdoc',
<add> 'source.litcoffee',
<add> 'text.plain.null-grammar',
<add> 'text.todo'
<ide> ])
<ide>
<ide> atom2.destroy() | 1 |
PHP | PHP | add test for html->meta() | a448f49c89a6c430c5a99dd44a252e65eaa01b20 | <ide><path>tests/TestCase/View/Helper/HtmlHelperTest.php
<ide> public function testMeta()
<ide> 'meta' => ['property' => 'og:site_name', 'content' => 'CakePHP']
<ide> ];
<ide> $this->assertHtml($expected, $result);
<add>
<add> $result = $this->Html->meta(['link' => 'http://example.com/manifest', 'rel' => 'manifest']);
<add> $expected = [
<add> 'link' => ['href' => 'http://example.com/manifest', 'rel' => 'manifest']
<add> ];
<add> $this->assertHtml($expected, $result);
<ide> }
<ide>
<ide> /** | 1 |
Python | Python | add dutch example sentences (see ) | f2ea6d4713fe32048e26c3992202cbd431bb3203 | <ide><path>spacy/lang/nl/examples.py
<add># coding: utf8
<add>from __future__ import unicode_literals
<add>
<add>
<add>"""
<add>Example sentences to test spaCy and its language models.
<add>
<add>>>> from spacy.lang.nl.examples import sentences
<add>>>> docs = nlp.pipe(sentences)
<add>"""
<add>
<add>
<add>examples = [
<add> "Apple overweegt om voor 1 miljard een U.K. startup te kopen",
<add> "Autonome auto's verschuiven de verzekeringverantwoordelijkheid naar producenten",
<add> "San Francisco overweegt robots op voetpaden te verbieden",
<add> "Londen is een grote stad in het Verenigd Koninkrijk"
<add>] | 1 |
Text | Text | add a missing comma | d4b3fcba71095cf7b758f57c5e61722db2d082d5 | <ide><path>doc/api/n-api.md
<ide> that has a loop which iterates through the elements in a large array:
<ide> ```C
<ide> for (int i = 0; i < 1000000; i++) {
<ide> napi_value result;
<del> napi_status status = napi_get_element(e object, i, &result);
<add> napi_status status = napi_get_element(e, object, i, &result);
<ide> if (status != napi_ok) {
<ide> break;
<ide> }
<ide> for (int i = 0; i < 1000000; i++) {
<ide> break;
<ide> }
<ide> napi_value result;
<del> status = napi_get_element(e object, i, &result);
<add> status = napi_get_element(e, object, i, &result);
<ide> if (status != napi_ok) {
<ide> break;
<ide> } | 1 |
Java | Java | log can[se]deserialize error in jackson codecs | 0f6038af70434a20fc4112a79c00c411ab88e519 | <ide><path>spring-web/src/main/java/org/springframework/http/codec/json/AbstractJackson2Decoder.java
<ide> import java.math.BigDecimal;
<ide> import java.util.List;
<ide> import java.util.Map;
<add>import java.util.concurrent.atomic.AtomicReference;
<ide>
<ide> import com.fasterxml.jackson.core.JsonProcessingException;
<ide> import com.fasterxml.jackson.databind.DeserializationFeature;
<ide> public int getMaxInMemorySize() {
<ide> public boolean canDecode(ResolvableType elementType, @Nullable MimeType mimeType) {
<ide> JavaType javaType = getObjectMapper().constructType(elementType.getType());
<ide> // Skip String: CharSequenceDecoder + "*/*" comes after
<del> return (!CharSequence.class.isAssignableFrom(elementType.toClass()) &&
<del> getObjectMapper().canDeserialize(javaType) && supportsMimeType(mimeType));
<add> if (CharSequence.class.isAssignableFrom(elementType.toClass()) || !supportsMimeType(mimeType)) {
<add> return false;
<add> }
<add> if (!logger.isDebugEnabled()) {
<add> return getObjectMapper().canDeserialize(javaType);
<add> }
<add> else {
<add> AtomicReference<Throwable> causeRef = new AtomicReference<>();
<add> if (getObjectMapper().canDeserialize(javaType, causeRef)) {
<add> return true;
<add> }
<add> logWarningIfNecessary(javaType, causeRef.get());
<add> return false;
<add> }
<ide> }
<ide>
<ide> @Override
<ide><path>spring-web/src/main/java/org/springframework/http/codec/json/AbstractJackson2Encoder.java
<ide> import java.util.Collections;
<ide> import java.util.List;
<ide> import java.util.Map;
<add>import java.util.concurrent.atomic.AtomicReference;
<ide>
<ide> import com.fasterxml.jackson.core.JsonEncoding;
<ide> import com.fasterxml.jackson.core.JsonGenerator;
<ide> public boolean canEncode(ResolvableType elementType, @Nullable MimeType mimeType
<ide> return false;
<ide> }
<ide> }
<del> return (Object.class == clazz ||
<del> (!String.class.isAssignableFrom(elementType.resolve(clazz)) && getObjectMapper().canSerialize(clazz)));
<add> if (String.class.isAssignableFrom(elementType.resolve(clazz))) {
<add> return false;
<add> }
<add> if (Object.class == clazz) {
<add> return true;
<add> }
<add> if (!logger.isDebugEnabled()) {
<add> return getObjectMapper().canSerialize(clazz);
<add> }
<add> else {
<add> AtomicReference<Throwable> causeRef = new AtomicReference<>();
<add> if (getObjectMapper().canSerialize(clazz, causeRef)) {
<add> return true;
<add> }
<add> logWarningIfNecessary(clazz, causeRef.get());
<add> return false;
<add> }
<ide> }
<ide>
<ide> @Override
<ide><path>spring-web/src/main/java/org/springframework/http/codec/json/Jackson2CodecSupport.java
<ide>
<ide> import com.fasterxml.jackson.annotation.JsonView;
<ide> import com.fasterxml.jackson.databind.JavaType;
<add>import com.fasterxml.jackson.databind.JsonMappingException;
<ide> import com.fasterxml.jackson.databind.ObjectMapper;
<ide> import org.apache.commons.logging.Log;
<ide>
<ide> protected List<MimeType> getMimeTypes() {
<ide>
<ide>
<ide> protected boolean supportsMimeType(@Nullable MimeType mimeType) {
<del> return (mimeType == null || this.mimeTypes.stream().anyMatch(m -> m.isCompatibleWith(mimeType)));
<add> if (mimeType == null) {
<add> return true;
<add> }
<add> for (MimeType supportedMimeType : this.mimeTypes) {
<add> if (supportedMimeType.isCompatibleWith(mimeType)) {
<add> return true;
<add> }
<add> }
<add> return false;
<add> }
<add>
<add> /**
<add> * Determine whether to log the given exception coming from a
<add> * {@link ObjectMapper#canDeserialize} / {@link ObjectMapper#canSerialize} check.
<add> * @param type the class that Jackson tested for (de-)serializability
<add> * @param cause the Jackson-thrown exception to evaluate
<add> * (typically a {@link JsonMappingException})
<add> * @since 5.3.1
<add> */
<add> protected void logWarningIfNecessary(Type type, @Nullable Throwable cause) {
<add> if (cause == null) {
<add> return;
<add> }
<add> if (logger.isDebugEnabled()) {
<add> String msg = "Failed to evaluate Jackson " + (type instanceof JavaType ? "de" : "") +
<add> "serialization for type [" + type + "]";
<add> logger.debug(msg, cause);
<add> }
<ide> }
<ide>
<ide> protected JavaType getJavaType(Type type, @Nullable Class<?> contextClass) { | 3 |
Java | Java | add missing @since tag | 93919204a166b306aef5dfb50ccc1b02decbc5ae | <ide><path>spring-jdbc/src/main/java/org/springframework/jdbc/core/simple/SimpleJdbcCallOperations.java
<ide> public interface SimpleJdbcCallOperations {
<ide> /**
<ide> * Indicates that parameters should be bound by name.
<ide> * @return the instance of this SimpleJdbcCall
<add> * @since 4.2
<ide> */
<ide> SimpleJdbcCallOperations withNamedBinding();
<ide> | 1 |
Text | Text | use tables for available plugins | 79351caec1b413c9da1080fceff49eb994323734 | <ide><path>docs/extend/plugins.md
<ide> Follow the instructions in the plugin's documentation.
<ide>
<ide> ## Finding a plugin
<ide>
<del>The following plugins exist:
<del>
<del>* The [Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume)
<del> is a volume plugin that provides access to an extensible set of
<del> container-based persistent storage options. It supports single and multi-host Docker
<del> environments with features that include tenant isolation, automated
<del> provisioning, encryption, secure deletion, snapshots and QoS.
<del>
<del>* The [Convoy plugin](https://github.com/rancher/convoy) is a volume plugin for a
<del> variety of storage back-ends including device mapper and NFS. It's a simple standalone
<del> executable written in Go and provides the framework to support vendor-specific extensions
<del> such as snapshots, backups and restore.
<del>
<del>* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin
<del> which provides multi-host portable volumes for Docker, enabling you to run
<del> databases and other stateful containers and move them around across a cluster
<del> of machines.
<del>
<del>* The [GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) is
<del> another volume plugin that provides multi-host volumes management for Docker
<del> using GlusterFS.
<del>
<del>* The [Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) allows on-demand,
<del> version controlled access to your data. Horcrux is an open-source plugin,
<del> written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3.
<del>
<del>* The [IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs)
<del> is an open source volume plugin that allows using an
<del> [ipfs](https://ipfs.io/) filesystem as a volume.
<del>
<del>* The [Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) is
<del> a plugin that provides credentials and secret management using Keywhiz as
<del> a central repository.
<del>
<del>* The [Netshare plugin](https://github.com/gondor/docker-volume-netshare) is a volume plugin
<del> that provides volume management for NFS 3/4, AWS EFS and CIFS file systems.
<del>
<del>* The [gce-docker plugin](https://github.com/mcuadros/gce-docker) is a volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks).
<del>
<del>* The [OpenStorage Plugin](https://github.com/libopenstorage/openstorage) is a cluster aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few.
<del>
<del>* The [Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform.
<del>
<del>* The [REX-Ray plugin](https://github.com/emccode/rexray) is a volume plugin
<del> which is written in Go and provides advanced storage functionality for many
<del> platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC.
<del>
<del>* The [Contiv Volume Plugin](https://github.com/contiv/volplugin) is an open
<del> source volume plugin that provides multi-tenant, persistent, distributed storage
<del> with intent based consumption using ceph underneath.
<del>
<del>* The [Contiv Networking](https://github.com/contiv/netplugin) is an open source
<del> libnetwork plugin to provide infrastructure and security policies for a
<del> multi-tenant micro services deployment, while providing an integration to
<del> physical network for non-container workload. Contiv Networking implements the
<del> remote driver and IPAM APIs available in Docker 1.9 onwards.
<del>
<del>* The [Weave Network Plugin](http://docs.weave.works/weave/latest_release/plugin.html)
<del> creates a virtual network that connects your Docker containers -
<del> across multiple hosts or clouds and enables automatic discovery of
<del> applications. Weave networks are resilient, partition tolerant,
<del> secure and work in partially connected networks, and other adverse
<del> environments - all configured with delightful simplicity.
<del>
<del>* The [Kuryr Network Plugin](https://github.com/openstack/kuryr) is
<del> developed as part of the OpenStack Kuryr project and implements the
<del> Docker networking (libnetwork) remote driver API by utilizing
<del> Neutron, the OpenStack networking service. It includes an IPAM
<del> driver as well.
<del>
<del>* The [Local Persist Plugin](https://github.com/CWSpear/local-persist)
<del> extends the default `local` driver's functionality by allowing you specify
<del> a mountpoint anywhere on the host, which enables the files to *always persist*,
<del> even if the volume is removed via `docker volume rm`.
<del>
<del>* The [NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) provides
<del> direct integration with the Docker ecosystem for the NetApp storage portfolio.
<del> The nDVP package supports the provisioning and management of
<del> storage resources from the storage platform to Docker hosts, with a robust
<del> framework for adding additional platforms in the future.
<add>The sections below provide an inexhaustive overview of available plugins.
<add>
<add><style>
<add>#content tr td:first-child { white-space: nowrap;}
<add></style>
<add>
<add>### Network plugins
<add>
<add>Plugin | Description
<add>----------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
<add>[Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards.
<add>[Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well.
<add>[Weave Network Plugin](http://docs.weave.works/weave/latest_release/plugin.html) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity.
<add>
<add>### Volume plugins
<add>
<add>Plugin | Description
<add>----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
<add>[Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) | A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS.
<add>[Contiv Volume Plugin](https://github.com/contiv/volplugin) | An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption using ceph underneath.
<add>[Convoy plugin](https://github.com/rancher/convoy) | A volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore.
<add>[Flocker plugin](https://clusterhq.com/docker-plugin/) | A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines.
<add>[gce-docker plugin](https://github.com/mcuadros/gce-docker) | A volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks).
<add>[GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) | A volume plugin that provides multi-host volumes management for Docker using GlusterFS.
<add>[Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3.
<add>[IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume.
<add>[Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository.
<add>[Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`.
<add>[NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future.
<add>[Netshare plugin](https://github.com/gondor/docker-volume-netshare) | A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems.
<add>[OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few.
<add>[Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform.
<add>[REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC.
<add>
<ide>
<ide> ## Troubleshooting a plugin
<ide> | 1 |
Javascript | Javascript | add viewconfig for multilinetextinput | 10f8c1a1c0dd3301daab32201462a41be1086345 | <ide><path>Libraries/Components/TextInput/RCTMultilineTextInputNativeComponent.js
<ide> import type {HostComponent} from '../../Renderer/shims/ReactNativeTypes';
<ide> import requireNativeComponent from '../../ReactNative/requireNativeComponent';
<ide> import codegenNativeCommands from '../../Utilities/codegenNativeCommands';
<ide> import type {TextInputNativeCommands} from './TextInputNativeCommands';
<add>import RCTTextInputViewConfig from './RCTTextInputViewConfig';
<add>const ReactNativeViewConfigRegistry = require('../../Renderer/shims/ReactNativeViewConfigRegistry');
<ide>
<ide> type NativeType = HostComponent<mixed>;
<ide>
<ide> export const Commands: NativeCommands = codegenNativeCommands<NativeCommands>({
<ide> supportedCommands: ['focus', 'blur', 'setTextAndSelection'],
<ide> });
<ide>
<del>const SinglelineTextInputNativeComponent: HostComponent<mixed> = requireNativeComponent<mixed>(
<del> 'RCTMultilineTextInputView',
<del>);
<add>let MultilineTextInputNativeComponent;
<add>if (global.RN$Bridgeless) {
<add> ReactNativeViewConfigRegistry.register('RCTMultilineTextInputView', () => {
<add> return RCTTextInputViewConfig;
<add> });
<add> MultilineTextInputNativeComponent = 'RCTMultilineTextInputView';
<add>} else {
<add> MultilineTextInputNativeComponent = requireNativeComponent<mixed>(
<add> 'RCTMultilineTextInputView',
<add> );
<add>}
<ide>
<del>export default SinglelineTextInputNativeComponent;
<add>// flowlint-next-line unclear-type:off
<add>export default ((MultilineTextInputNativeComponent: any): HostComponent<mixed>);
<ide><path>Libraries/Components/TextInput/RCTSingelineTextInputNativeComponent.js
<ide> import type {HostComponent} from '../../Renderer/shims/ReactNativeTypes';
<ide> import requireNativeComponent from '../../ReactNative/requireNativeComponent';
<ide> import codegenNativeCommands from '../../Utilities/codegenNativeCommands';
<ide> import type {TextInputNativeCommands} from './TextInputNativeCommands';
<del>import RCTSinglelineTextInputViewConfig from './RCTSinglelineTextInputViewConfig';
<add>import RCTTextInputViewConfig from './RCTTextInputViewConfig';
<ide> const ReactNativeViewConfigRegistry = require('../../Renderer/shims/ReactNativeViewConfigRegistry');
<ide>
<ide> type NativeType = HostComponent<mixed>;
<ide> export const Commands: NativeCommands = codegenNativeCommands<NativeCommands>({
<ide> let SinglelineTextInputNativeComponent;
<ide> if (global.RN$Bridgeless) {
<ide> ReactNativeViewConfigRegistry.register('RCTSinglelineTextInputView', () => {
<del> return RCTSinglelineTextInputViewConfig;
<add> return RCTTextInputViewConfig;
<ide> });
<ide> SinglelineTextInputNativeComponent = 'RCTSinglelineTextInputView';
<ide> } else {
<add><path>Libraries/Components/TextInput/RCTTextInputViewConfig.js
<del><path>Libraries/Components/TextInput/RCTSinglelineTextInputViewConfig.js
<ide> import ReactNativeViewViewConfig from '../../Components/View/ReactNativeViewViewConfig';
<ide> import {type ViewConfig} from '../../Renderer/shims/ReactNativeTypes';
<ide>
<del>const RCTSinglelineTextInputViewConfig = {
<add>const RCTTextInputViewConfig = {
<ide> uiViewClassName: 'RCTSinglelineTextInputView',
<ide> bubblingEventTypes: {
<ide> topBlur: {
<ide> const RCTSinglelineTextInputViewConfig = {
<ide> },
<ide> };
<ide>
<del>module.exports = (RCTSinglelineTextInputViewConfig: ViewConfig);
<add>module.exports = (RCTTextInputViewConfig: ViewConfig); | 3 |
Javascript | Javascript | fix params for non-get actions | 212a6ff29a2e047413a85dfe2a4ea4b71e0f0d68 | <ide><path>src/service/resource.js
<ide> * parameters:
<ide> *
<ide> * - HTTP GET "class" actions: `Resource.action([parameters], [success], [error])`
<del> * - non-GET "class" actions: `Resource.action(postData, [parameters], [success], [error])`
<add> * - non-GET "class" actions: `Resource.action([parameters], postData, [success], [error])`
<ide> * - non-GET instance actions: `instance.$action([parameters], [success], [error])`
<ide> *
<ide> * | 1 |
PHP | PHP | fix coding standards | 771cfde19631b7cb147b9c9af06b064f0b15d635 | <ide><path>lib/Cake/Test/Case/View/Helper/FormHelperTest.php
<ide> public function testPostLink() {
<ide> '/a'
<ide> ));
<ide>
<del> $result = $this->Form->postLink('Delete', '/posts/delete/1', array('method'=>'delete'));
<add> $result = $this->Form->postLink('Delete', '/posts/delete/1', array('method' => 'delete'));
<ide> $this->assertTags($result, array(
<ide> 'form' => array(
<ide> 'method' => 'post', 'action' => '/posts/delete/1',
<ide><path>lib/Cake/Test/Case/View/Helper/RssHelperTest.php
<ide> public function testItemEnclosureLength() {
<ide> } else {
<ide> $type = mime_content_type($tmpFile);
<ide> }
<del>
<add>
<ide> $expected = array(
<ide> '<item',
<ide> '<title',
<ide><path>lib/Cake/View/Helper/FormHelper.php
<ide> public function postButton($title, $url, $options = array()) {
<ide> * @link http://book.cakephp.org/2.0/en/core-libraries/helpers/form.html#FormHelper::postLink
<ide> */
<ide> public function postLink($title, $url = null, $options = array(), $confirmMessage = false) {
<del> $requestMethod = 'POST';
<add> $requestMethod = 'POST';
<ide> if (!empty($options['method'])) {
<ide> $requestMethod = strtoupper($options['method']);
<ide> unset($options['method']);
<ide> public function postLink($title, $url = null, $options = array(), $confirmMessag
<ide>
<ide> $formName = uniqid('post_');
<ide> $formUrl = $this->url($url);
<del> $out = $this->Html->useTag('form', $formUrl, array('name' => $formName, 'id' => $formName, 'style' => 'display:none;', 'method' => 'post'));
<del> $out .= $this->Html->useTag('hidden', '_method', ' value="' . $requestMethod . '"');
<add> $out = $this->Html->useTag('form', $formUrl, array(
<add> 'name' => $formName,
<add> 'id' => $formName,
<add> 'style' => 'display:none;',
<add> 'method' => 'post'
<add> ));
<add> $out .= $this->Html->useTag('hidden', '_method', array(
<add> 'value' => $requestMethod
<add> ));
<ide> $out .= $this->_csrfField();
<ide>
<ide> $fields = array(); | 3 |
Text | Text | fix bad logic in post error handling example | 2e4de22c8ce9b4e087ac2753ead4688fe0c05a35 | <ide><path>docs/tutorials/essentials/part-5-async-logic.md
<ide> const postsSlice = createSlice({
<ide> },
<ide> [fetchPosts.rejected]: (state, action) => {
<ide> state.status = 'failed'
<del> state.error = action.payload
<add> state.error = action.error.message
<ide> }
<ide> }
<ide> // highlight-end
<ide> export const PostsList = () => {
<ide> content = orderedPosts.map(post => (
<ide> <PostExcerpt key={post.id} post={post} />
<ide> ))
<del> } else if (postStatus === 'error') {
<add> } else if (postStatus === 'failed') {
<ide> content = <div>{error}</div>
<ide> }
<ide> // highlight-end | 1 |
PHP | PHP | remove empty space to match code standards | 2dff1a1f250b3d232a84b270a20e07277e1ecb28 | <ide><path>tests/TestCase/Routing/RouterTest.php
<ide> public function testBaseUrlWithBasePath()
<ide> }
<ide>
<ide> /**
<del> * Test that Router used the correct url including base path for requesting current actions.
<add> * Test that Router uses the correct url including base path for requesting the current actions.
<ide> *
<ide> * @return void
<ide> */
<ide> public function testCurrentUrlWithBasePath()
<ide> Router::setRequestInfo($request);
<ide> $this->assertEquals('http://example.com/cakephp/pages/view/1', Router::url(null, true));
<ide> $this->assertEquals('/cakephp/pages/view/1', Router::url());
<del>
<ide> }
<ide>
<ide> /** | 1 |
Python | Python | improve string encodeing/deconding | 47b2983f28357dee8bd29bda7909386dc8d44950 | <ide><path>glances/compat.py
<ide> # pylint: skip-file
<ide> """Python 2/3 compatibility shims."""
<ide>
<del>from __future__ import print_function
<add>from __future__ import print_function, unicode_literals
<add>
<ide> import operator
<ide> import sys
<ide> import unicodedata
<ide> def iterkeys(d):
<ide> def itervalues(d):
<ide> return iter(d.values())
<ide>
<del> def u(s):
<add> def u(s, errors='replace'):
<ide> if isinstance(s, text_type):
<ide> return s
<del> return s.decode('utf-8', 'replace')
<add> return s.decode('utf-8', errors=errors)
<ide>
<del> def b(s):
<add> def b(s, errors='replace'):
<ide> if isinstance(s, binary_type):
<ide> return s
<del> return s.encode('utf-8')
<add> return s.encode('utf-8', errors=errors)
<add>
<add> def n(s):
<add> '''Only in Python 2...
<add> from future.utils import bytes_to_native_str as n
<add> '''
<add> return s
<ide>
<del> def nativestr(s):
<add> def nativestr(s, errors='replace'):
<ide> if isinstance(s, text_type):
<ide> return s
<ide> elif isinstance(s, (int, float)):
<ide> return s.__str__()
<ide> else:
<del> return s.decode('utf-8', 'replace')
<add> return s.decode('utf-8', errors=errors)
<ide>
<ide> def system_exec(command):
<ide> """Execute a system command and return the resul as a str"""
<ide> def system_exec(command):
<ide> return res.rstrip()
<ide>
<ide> else:
<add> from future.utils import bytes_to_native_str as n
<ide> import Queue as queue
<ide> from itertools import imap as map
<ide> from ConfigParser import SafeConfigParser as ConfigParser, NoOptionError, NoSectionError
<ide> def iterkeys(d):
<ide> def itervalues(d):
<ide> return d.itervalues()
<ide>
<del> def u(s):
<add> def u(s, errors='replace'):
<ide> if isinstance(s, text_type):
<ide> return s
<del> return s.decode('utf-8')
<add> return s.decode('utf-8', errors=errors)
<ide>
<del> def b(s):
<add> def b(s, errors='replace'):
<ide> if isinstance(s, binary_type):
<ide> return s
<del> return s.encode('utf-8', 'replace')
<add> return s.encode('utf-8', errors=errors)
<ide>
<del> def nativestr(s):
<add> def nativestr(s, errors='replace'):
<ide> if isinstance(s, binary_type):
<ide> return s
<ide> elif isinstance(s, (int, float)):
<ide> return s.__str__()
<ide> else:
<del> return s.encode('utf-8', 'replace')
<add> return s.encode('utf-8', errors=errors)
<ide>
<ide> def system_exec(command):
<ide> """Execute a system command and return the resul as a str"""
<ide><path>glances/folder_list.py
<ide> # along with this program. If not, see <http://www.gnu.org/licenses/>.
<ide>
<ide> """Manage the folder list."""
<add>from __future__ import unicode_literals
<ide>
<ide> import os
<ide>
<ide><path>glances/outputs/glances_curses.py
<ide> # along with this program. If not, see <http://www.gnu.org/licenses/>.
<ide>
<ide> """Curses interface class."""
<add>from __future__ import unicode_literals
<ide>
<ide> import re
<ide> import sys
<ide><path>glances/plugins/glances_diskio.py
<ide> # along with this program. If not, see <http://www.gnu.org/licenses/>.
<ide>
<ide> """Disk I/O plugin."""
<add>from __future__ import unicode_literals
<ide>
<del>from glances.compat import nativestr
<add>from glances.compat import nativestr, n
<ide> from glances.timer import getTimeSinceLastUpdate
<ide> from glances.plugins.glances_plugin import GlancesPlugin
<ide>
<ide> def update(self):
<ide> self.diskio_old[disk].write_bytes)
<ide> diskstat = {
<ide> 'time_since_update': time_since_update,
<del> 'disk_name': disk,
<add> 'disk_name': n(disk),
<ide> 'read_count': read_count,
<ide> 'write_count': write_count,
<ide> 'read_bytes': read_bytes,
<ide><path>glances/plugins/glances_folders.py
<ide> # along with this program. If not, see <http://www.gnu.org/licenses/>.
<ide>
<ide> """Folder plugin."""
<add>from __future__ import unicode_literals
<ide>
<ide> import numbers
<ide>
<del>from glances.compat import nativestr
<add>from glances.compat import nativestr, n
<ide> from glances.folder_list import FolderList as glancesFolderList
<ide> from glances.plugins.glances_plugin import GlancesPlugin
<ide> from glances.logger import logger
<ide><path>glances/plugins/glances_fs.py
<ide> # along with this program. If not, see <http://www.gnu.org/licenses/>.
<ide>
<ide> """File system plugin."""
<add>from __future__ import unicode_literals
<ide>
<ide> import operator
<ide>
<del>from glances.compat import u, nativestr
<add>from glances.compat import u, nativestr, n
<ide> from glances.plugins.glances_plugin import GlancesPlugin
<ide>
<ide> import psutil
<ide><path>glances/plugins/glances_network.py
<ide> # along with this program. If not, see <http://www.gnu.org/licenses/>.
<ide>
<ide> """Network plugin."""
<add>from __future__ import unicode_literals
<ide>
<ide> import base64
<ide> import operator
<ide>
<ide> from glances.timer import getTimeSinceLastUpdate
<ide> from glances.plugins.glances_plugin import GlancesPlugin
<add>from glances.compat import n, u, b, nativestr
<ide>
<ide> import psutil
<ide>
<ide> def update(self):
<ide> rx = cumulative_rx - self.network_old[net].bytes_recv
<ide> tx = cumulative_tx - self.network_old[net].bytes_sent
<ide> cx = rx + tx
<del> netstat = {'interface_name': net,
<add> netstat = {'interface_name': n(net),
<ide> 'time_since_update': time_since_update,
<ide> 'cumulative_rx': cumulative_rx,
<ide> 'rx': rx, | 7 |
Javascript | Javascript | rntester transformexample crash | ff4a3c2bc76b3e386c3d727ed6edbf767d13e3b0 | <ide><path>packages/rn-tester/js/examples/Transform/TransformExample.js
<ide> const styles = StyleSheet.create({
<ide> width: 50,
<ide> },
<ide> box7Transform: {
<del> transform: 'translate(-50, 35) rotate(50deg) scale(2)',
<add> transform: 'translate(-50px, 35px) rotate(50deg) scale(2)',
<ide> },
<ide> flipCardContainer: {
<ide> marginVertical: 40,
<ide> exports.examples = [
<ide> },
<ide> {
<ide> title: 'Transform using a string',
<del> description: "transform: 'translate(-50, 35) rotate(50deg) scale(2)'",
<add> description: "transform: 'translate(-50px, 35px) rotate(50deg) scale(2)'",
<ide> render(): Node {
<ide> return (
<ide> <View style={styles.container}> | 1 |
Javascript | Javascript | add types to rctsnapshotnativecomponent | 67ad72fa3c9beb5c2b1e32530b0b2c0b5e74883c | <ide><path>Libraries/RCTTest/RCTSnapshotNativeComponent.js
<ide> * LICENSE file in the root directory of this source tree.
<ide> *
<ide> * @format
<del> * @flow strict-local
<add> * @flow
<ide> */
<ide>
<ide> 'use strict';
<ide>
<add>import type {SyntheticEvent} from 'CoreEventTypes';
<add>import type {ViewProps} from 'ViewPropTypes';
<add>import type {NativeComponent} from 'ReactNative';
<add>
<add>type SnapshotReadyEvent = SyntheticEvent<
<add> $ReadOnly<{
<add> testIdentifier: string,
<add> }>,
<add>>;
<add>
<add>type NativeProps = $ReadOnly<{|
<add> ...ViewProps,
<add> onSnapshotReady?: ?(event: SnapshotReadyEvent) => mixed,
<add> testIdentifier?: ?string,
<add>|}>;
<add>
<add>type SnapshotViewNativeType = Class<NativeComponent<NativeProps>>;
<add>
<ide> const requireNativeComponent = require('requireNativeComponent');
<ide>
<del>module.exports = requireNativeComponent('RCTSnapshot');
<add>module.exports = ((requireNativeComponent('RCTSnapshot'):any): SnapshotViewNativeType); | 1 |
Python | Python | move the model type check | 216b2f9e8061599982b635f3c6b0240f79d21e95 | <ide><path>src/transformers/pipelines/document_question_answering.py
<ide> class DocumentQuestionAnsweringPipeline(Pipeline):
<ide>
<ide> def __init__(self, *args, **kwargs):
<ide> super().__init__(*args, **kwargs)
<del> self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING)
<ide>
<ide> if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig":
<ide> self.model_type = ModelType.VisionEncoderDecoder
<ide> if self.model.config.encoder.model_type != "donut-swin":
<ide> raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut")
<del> elif self.model.config.__class__.__name__ == "LayoutLMConfig":
<del> self.model_type = ModelType.LayoutLM
<ide> else:
<del> self.model_type = ModelType.LayoutLMv2andv3
<add> self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING)
<add> if self.model.config.__class__.__name__ == "LayoutLMConfig":
<add> self.model_type = ModelType.LayoutLM
<add> else:
<add> self.model_type = ModelType.LayoutLMv2andv3
<ide>
<ide> def _sanitize_parameters(
<ide> self, | 1 |
Text | Text | update docs with react v15.5.0 | 49a30af4cbeee90b058496d6aaa65b5e926a47e7 | <ide><path>docs/advanced/ExampleRedditAPI.md
<ide> export default class Root extends Component {
<ide> #### `containers/AsyncApp.js`
<ide>
<ide> ```js
<del>import React, { Component, PropTypes } from 'react'
<add>import React, { Component } from 'react'
<add>import PropTypes from 'prop-types'
<ide> import { connect } from 'react-redux'
<ide> import { selectSubreddit, fetchPostsIfNeeded, invalidateSubreddit } from '../actions'
<ide> import Picker from '../components/Picker'
<ide> export default connect(mapStateToProps)(AsyncApp)
<ide> #### `components/Picker.js`
<ide>
<ide> ```js
<del>import React, { Component, PropTypes } from 'react'
<add>import React, { Component } from 'react'
<add>import PropTypes from 'prop-types'
<ide>
<ide> export default class Picker extends Component {
<ide> render() {
<ide> Picker.propTypes = {
<ide> #### `components/Posts.js`
<ide>
<ide> ```js
<del>import React, { PropTypes, Component } from 'react'
<add>import React, { Component } from 'react'
<add>import PropTypes from 'prop-types'
<ide>
<ide> export default class Posts extends Component {
<ide> render() {
<ide><path>docs/advanced/UsageWithReactRouter.md
<ide> Unless you are targeting old browsers like IE9, you can always use `browserHisto
<ide>
<ide> #### `components/Root.js`
<ide> ``` js
<del>import React, { PropTypes } from 'react';
<add>import React from 'react';
<add>import PropTypes from 'prop-types';
<ide> import { Provider } from 'react-redux';
<ide> import { Router, Route, browserHistory } from 'react-router';
<ide> import App from './App';
<ide><path>docs/basics/ExampleTodoList.md
<ide> export default todoApp
<ide> #### `components/Todo.js`
<ide>
<ide> ```js
<del>import React, { PropTypes } from 'react'
<add>import React, from 'react'
<add>import PropTypes from 'prop-types'
<ide>
<ide> const Todo = ({ onClick, completed, text }) => (
<ide> <li
<ide> export default Todo
<ide> #### `components/TodoList.js`
<ide>
<ide> ```js
<del>import React, { PropTypes } from 'react'
<add>import React from 'react'
<add>import PropTypes from 'prop-types'
<ide> import Todo from './Todo'
<ide>
<ide> const TodoList = ({ todos, onTodoClick }) => (
<ide> export default TodoList
<ide>
<ide> #### `components/Link.js`
<ide> ```js
<del>import React, { PropTypes } from 'react'
<add>import React from 'react'
<add>import PropTypes from 'prop-types'
<ide>
<ide> const Link = ({ active, children, onClick }) => {
<ide> if (active) {
<ide><path>docs/basics/UsageWithReact.md
<ide> These are all normal React components, so we won't examine them in detail. We wr
<ide> #### `components/Todo.js`
<ide>
<ide> ```js
<del>import React, { PropTypes } from 'react'
<add>import React from 'react'
<add>import PropTypes from 'prop-types'
<ide>
<ide> const Todo = ({ onClick, completed, text }) => (
<ide> <li
<ide> export default Todo
<ide> #### `components/TodoList.js`
<ide>
<ide> ```js
<del>import React, { PropTypes } from 'react'
<add>import React from 'react'
<add>import PropTypes from 'prop-types'
<ide> import Todo from './Todo'
<ide>
<ide> const TodoList = ({ todos, onTodoClick }) => (
<ide> export default TodoList
<ide> #### `components/Link.js`
<ide>
<ide> ```js
<del>import React, { PropTypes } from 'react'
<add>import React from 'react'
<add>import PropTypes from 'prop-types'
<ide>
<ide> const Link = ({ active, children, onClick }) => {
<ide> if (active) {
<ide><path>docs/recipes/WritingTests.md
<ide> To test the components we make a `setup()` helper that passes the stubbed callba
<ide> #### Example
<ide>
<ide> ```js
<del>import React, { PropTypes, Component } from 'react'
<add>import React, { Component } from 'react'
<add>import PropTypes from 'prop-types'
<ide> import TodoTextInput from './TodoTextInput'
<ide>
<ide> class Header extends Component { | 5 |
PHP | PHP | fix double get on cache remember | 111d4444c5ca331759d9dd2bc3a6561c9d57fbcd | <ide><path>src/Illuminate/Cache/Repository.php
<ide> public function rememberForever($key, Closure $callback)
<ide> // If the item exists in the cache we will just return this immediately
<ide> // otherwise we will execute the given Closure and cache the result
<ide> // of that execution for the given number of minutes. It's easy.
<del> if ($this->has($key)) return $this->get($key);
<add> if ( ! is_null($value = $this->get($key)))
<add> {
<add> return $value;
<add> }
<ide>
<ide> $this->forever($key, $value = $callback());
<ide> | 1 |
Text | Text | add bethany to tsc | dff22dd176d584f3c050a659fa514f079ab5f208 | <ide><path>README.md
<ide> For information about the governance of the Node.js project, see
<ide> **Anna Henningsen** <anna@addaleax.net> (she/her)
<ide> * [apapirovski](https://github.com/apapirovski) -
<ide> **Anatoli Papirovski** <apapirovski@mac.com> (he/him)
<add>* [BethGriggs](https://github.com/BethGriggs) -
<add>**Beth Griggs** <Bethany.Griggs@uk.ibm.com> (she/her)
<ide> * [ChALkeR](https://github.com/ChALkeR) -
<ide> **Сковорода Никита Андреевич** <chalkerx@gmail.com> (he/him)
<ide> * [cjihrig](https://github.com/cjihrig) - | 1 |
Java | Java | fix typos in reactcxxerrorhandler message | 0aed5d9db2569e9c6550f5f241f95d244398a97a | <ide><path>ReactAndroid/src/main/java/com/facebook/react/bridge/ReactCxxErrorHandler.java
<ide> private static void handleError(final String message) {
<ide> parameters[0] = new Exception(message);
<ide> mHandleErrorFunc.invoke(mObject, parameters);
<ide> } catch (Exception e) {
<del> FLog.e("ReactCxxErrorHandler", "Failed to invole error hanlder function", e);
<add> FLog.e("ReactCxxErrorHandler", "Failed to invoke error handler function", e);
<ide> }
<ide> }
<ide> } | 1 |
Python | Python | remove ciscoccs driver | bdccdb03e8b36ad4eefbca7601952bc7757e838a | <ide><path>libcloud/compute/drivers/ciscoccs.py
<del># Licensed to the Apache Software Foundation (ASF) under one or more
<del># contributor license agreements. See the NOTICE file distributed with
<del># this work for additional information regarding copyright ownership.
<del># The ASF licenses this file to You under the Apache License, Version 2.0
<del># (the "License"); you may not use this file except in compliance with
<del># the License. You may obtain a copy of the License at
<del>#
<del># http://www.apache.org/licenses/LICENSE-2.0
<del>#
<del># Unless required by applicable law or agreed to in writing, software
<del># distributed under the License is distributed on an "AS IS" BASIS,
<del># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<del># See the License for the specific language governing permissions and
<del># limitations under the License.
<del>"""
<del>Cisco CCS Driver
<del>"""
<del>
<del>from libcloud.compute.providers import Provider
<del>from libcloud.common.dimensiondata import (DimensionDataConnection,
<del> API_ENDPOINTS)
<del>from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver
<del>
<del>DEFAULT_REGION = 'cisco-na'
<del>
<del>
<del>class CiscoCCSNodeDriver(DimensionDataNodeDriver):
<del> """
<del> Cisco CCS node driver, based on Dimension Data driver
<del> """
<del>
<del> selected_region = None
<del> connectionCls = DimensionDataConnection
<del> name = 'CiscoCCS'
<del> website = 'http://www.cisco.com/'
<del> type = Provider.CISCOCCS
<del> features = {'create_node': ['password']}
<del> api_version = 1.0
<del>
<del> def __init__(self, key, secret=None, secure=True, host=None, port=None,
<del> api_version=None, region=DEFAULT_REGION, **kwargs):
<del>
<del> if region not in API_ENDPOINTS:
<del> raise ValueError('Invalid region: %s' % (region))
<del>
<del> self.selected_region = API_ENDPOINTS[region]
<del>
<del> super(CiscoCCSNodeDriver, self).__init__(
<del> key=key,
<del> secret=secret,
<del> secure=secure,
<del> host=host,
<del> port=port,
<del> api_version=api_version,
<del> region=region,
<del> **kwargs)
<ide><path>libcloud/compute/providers.py
<ide> ('libcloud.compute.drivers.medone', 'MedOneNodeDriver'),
<ide> Provider.BSNL:
<ide> ('libcloud.compute.drivers.bsnl', 'BSNLNodeDriver'),
<del> Provider.CISCOCCS:
<del> ('libcloud.compute.drivers.ciscoccs', 'CiscoCCSNodeDriver'),
<ide> Provider.NTTA:
<ide> ('libcloud.compute.drivers.ntta', 'NTTAmericaNodeDriver'),
<ide> Provider.ALIYUN_ECS: | 2 |
Text | Text | update changelog for 16.14, 15.7, 0.14.10 | b5eabd543f4a6c9d950f1bb5ec73da5a1c7c5caa | <ide><path>CHANGELOG.md
<add>## 16.14.0 (October 14, 2020)
<add>
<add>### React
<add>
<add>* Add support for the [new JSX transform](https://reactjs.org/blog/2020/09/22/introducing-the-new-jsx-transform.html). ([@lunaruan](https://github.com/lunaruan) in [#18299](https://github.com/facebook/react/pull/18299))
<add>
<ide> ## 16.13.1 (March 19, 2020)
<ide>
<ide> ### React DOM
<ide> Starting with 16.1.0, we will no longer be publishing new releases on Bower. You
<ide> - There is no `react-with-addons.js` build anymore. All compatible addons are published separately on npm, and have single-file browser versions if you need them.
<ide> - The deprecations introduced in 15.x have been removed from the core package. `React.createClass` is now available as create-react-class, `React.PropTypes` as prop-types, `React.DOM` as react-dom-factories, react-addons-test-utils as react-dom/test-utils, and shallow renderer as react-test-renderer/shallow. See [15.5.0](https://reactjs.org/blog/2017/04/07/react-v15.5.0.html) and [15.6.0](https://reactjs.org/blog/2017/06/13/react-v15.6.0.html) blog posts for instructions on migrating code and automated codemods.
<ide>
<add>## 15.7.0 (October 14, 2020)
<add>
<add>### React
<add>
<add>* Backport support for the [new JSX transform](https://reactjs.org/blog/2020/09/22/introducing-the-new-jsx-transform.html) to 15.x. ([@lunaruan](https://github.com/lunaruan) in [#18299](https://github.com/facebook/react/pull/18299) and [@gaearon](https://github.com/gaearon) in [#20024](https://github.com/facebook/react/pull/20024))
<add>
<ide> ## 15.6.2 (September 25, 2017)
<ide>
<ide> ### All Packages
<ide> Each of these changes will continue to work as before with a new warning until t
<ide> - React DOM adds a new `suppressContentEditableWarning` prop for components like [Draft.js](https://draftjs.org/) that intentionally manage `contentEditable` children with React. ([@mxstbr](https://github.com/mxstbr) in [#6112](https://github.com/facebook/react/pull/6112))
<ide> - React improves the performance for `createClass()` on complex specs. ([@sophiebits](https://github.com/sophiebits) in [#5550](https://github.com/facebook/react/pull/5550))
<ide>
<add>## 0.14.10 (October 14, 2020)
<add>
<add>### React
<add>
<add>* Backport support for the [new JSX transform](https://reactjs.org/blog/2020/09/22/introducing-the-new-jsx-transform.html) to 0.14.x. ([@lunaruan](https://github.com/lunaruan) in [#18299](https://github.com/facebook/react/pull/18299) and [@gaearon](https://github.com/gaearon) in [#20024](https://github.com/facebook/react/pull/20024))
<ide>
<ide> ## 0.14.8 (March 29, 2016)
<ide> | 1 |
Ruby | Ruby | apply suggestions from code review | 1d9ee93e6ecdd85eb9c763c83ab7dca3ca3427b6 | <ide><path>Library/Homebrew/extend/ENV/std.rb
<ide> def setup_build_environment(formula: nil, cc: nil, build_bottle: false, bottle_a
<ide>
<ide> begin
<ide> send(compiler)
<del> rescue CompilerSelectionError => e
<add> rescue CompilerSelectionError
<ide> # We don't care if our compiler fails to build the formula during `brew test`.
<del> raise e unless testing_formula
<add> raise unless testing_formula
<ide>
<ide> send(DevelopmentTools.default_compiler)
<ide> end | 1 |
Go | Go | fix build after merge of outdated pr | 7e8f7efdd15c0fe333cb657b07fdbf986e022a2c | <ide><path>builder/dockerfile/copy.go
<ide> func copyFile(archiver Archiver, source, dest *copyEndpoint, identity *idtools.I
<ide> // are of the form \\?\Volume{<GUID>}\<path>. An example would be:
<ide> // \\?\Volume{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\bin\busybox.exe
<ide>
<del> if err := system.MkdirAll(filepath.Dir(dest.path), 0755, ""); err != nil {
<add> if err := system.MkdirAll(filepath.Dir(dest.path), 0755); err != nil {
<ide> return err
<ide> }
<ide> } else { | 1 |
Java | Java | implement the 'using' operator | 2eded0a0ac73af0bccdbd256f0460d6a63ecfe41 | <ide><path>rxjava-core/src/main/java/rx/Observable.java
<ide> import rx.operators.OperationToObservableIterable;
<ide> import rx.operators.OperationToObservableList;
<ide> import rx.operators.OperationToObservableSortedList;
<add>import rx.operators.OperationUsing;
<ide> import rx.operators.OperationWindow;
<ide> import rx.operators.OperationZip;
<ide> import rx.operators.SafeObservableSubscription;
<ide> public Observable<TimeInterval<T>> timeInterval(Scheduler scheduler) {
<ide> return create(OperationTimeInterval.timeInterval(this, scheduler));
<ide> }
<ide>
<add> /**
<add> * Constructs an observable sequence that depends on a resource object.
<add> *
<add> * @param resourceFactory
<add> * The factory function to obtain a resource object.
<add> * @param observableFactory
<add> * The factory function to obtain an observable sequence that depends on the obtained resource.
<add> * @return
<add> * The observable sequence whose lifetime controls the lifetime of the dependent resource object.
<add> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229585(v=vs.103).aspx">MSDN: Observable.Using</a>
<add> */
<add> public static <T, RESOURCE extends Subscription> Observable<T> using(Func0<RESOURCE> resourceFactory, Func1<RESOURCE, Observable<T>> observableFactory) {
<add> return create(OperationUsing.using(resourceFactory, observableFactory));
<add> }
<add>
<ide> /**
<ide> * Propagates the observable sequence that reacts first.
<ide> *
<ide><path>rxjava-core/src/main/java/rx/operators/OperationUsing.java
<add>/**
<add> * Copyright 2013 Netflix, Inc.
<add> *
<add> * Licensed under the Apache License, Version 2.0 (the "License");
<add> * you may not use this file except in compliance with the License.
<add> * You may obtain a copy of the License at
<add> *
<add> * http://www.apache.org/licenses/LICENSE-2.0
<add> *
<add> * Unless required by applicable law or agreed to in writing, software
<add> * distributed under the License is distributed on an "AS IS" BASIS,
<add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add> * See the License for the specific language governing permissions and
<add> * limitations under the License.
<add> */
<add>package rx.operators;
<add>
<add>import rx.Observable;
<add>import rx.Observable.OnSubscribeFunc;
<add>import rx.Observer;
<add>import rx.Subscription;
<add>import rx.subscriptions.CompositeSubscription;
<add>import rx.subscriptions.Subscriptions;
<add>import rx.util.functions.Func0;
<add>import rx.util.functions.Func1;
<add>
<add>/**
<add> * Constructs an observable sequence that depends on a resource object.
<add> */
<add>public class OperationUsing {
<add>
<add> public static <T, RESOURCE extends Subscription> OnSubscribeFunc<T> using(
<add> final Func0<RESOURCE> resourceFactory,
<add> final Func1<RESOURCE, Observable<T>> observableFactory) {
<add> return new OnSubscribeFunc<T>() {
<add> @Override
<add> public Subscription onSubscribe(Observer<? super T> observer) {
<add> Subscription resourceSubscription = Subscriptions.empty();
<add> try {
<add> RESOURCE resource = resourceFactory.call();
<add> if (resource != null) {
<add> resourceSubscription = resource;
<add> }
<add> Observable<T> observable = observableFactory.call(resource);
<add> SafeObservableSubscription subscription = new SafeObservableSubscription();
<add> // Use SafeObserver to guarantee resourceSubscription will
<add> // be unsubscribed.
<add> return subscription.wrap(new CompositeSubscription(
<add> observable.subscribe(new SafeObserver<T>(
<add> subscription, observer)),
<add> resourceSubscription));
<add> } catch (Throwable e) {
<add> resourceSubscription.unsubscribe();
<add> return Observable.<T> error(e).subscribe(observer);
<add> }
<add> }
<add> };
<add> }
<add>}
<ide><path>rxjava-core/src/test/java/rx/operators/OperationUsingTest.java
<add>/**
<add> * Copyright 2013 Netflix, Inc.
<add> *
<add> * Licensed under the Apache License, Version 2.0 (the "License");
<add> * you may not use this file except in compliance with the License.
<add> * You may obtain a copy of the License at
<add> *
<add> * http://www.apache.org/licenses/LICENSE-2.0
<add> *
<add> * Unless required by applicable law or agreed to in writing, software
<add> * distributed under the License is distributed on an "AS IS" BASIS,
<add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add> * See the License for the specific language governing permissions and
<add> * limitations under the License.
<add> */
<add>package rx.operators;
<add>
<add>import static org.junit.Assert.fail;
<add>import static org.mockito.Mockito.inOrder;
<add>import static org.mockito.Mockito.mock;
<add>import static org.mockito.Mockito.times;
<add>import static org.mockito.Mockito.verify;
<add>import static org.mockito.Mockito.when;
<add>import static rx.operators.OperationUsing.using;
<add>
<add>import org.junit.Test;
<add>import org.mockito.InOrder;
<add>
<add>import rx.Observable;
<add>import rx.Observable.OnSubscribeFunc;
<add>import rx.Observer;
<add>import rx.Subscription;
<add>import rx.subscriptions.Subscriptions;
<add>import rx.util.functions.Action0;
<add>import rx.util.functions.Func0;
<add>import rx.util.functions.Func1;
<add>
<add>public class OperationUsingTest {
<add>
<add> @SuppressWarnings("serial")
<add> private static class TestException extends RuntimeException {
<add> }
<add>
<add> private static interface Resource extends Subscription {
<add> public String getTextFromWeb();
<add>
<add> @Override
<add> public void unsubscribe();
<add> }
<add>
<add> @Test
<add> public void testUsing() {
<add> final Resource resource = mock(Resource.class);
<add> when(resource.getTextFromWeb()).thenReturn("Hello world!");
<add>
<add> Func0<Resource> resourceFactory = new Func0<Resource>() {
<add> @Override
<add> public Resource call() {
<add> return resource;
<add> }
<add> };
<add>
<add> Func1<Resource, Observable<String>> observableFactory = new Func1<Resource, Observable<String>>() {
<add> @Override
<add> public Observable<String> call(Resource resource) {
<add> return Observable.from(resource.getTextFromWeb().split(" "));
<add> }
<add> };
<add>
<add> @SuppressWarnings("unchecked")
<add> Observer<String> observer = (Observer<String>) mock(Observer.class);
<add> Observable<String> observable = Observable.create(using(
<add> resourceFactory, observableFactory));
<add> observable.subscribe(observer);
<add>
<add> InOrder inOrder = inOrder(observer);
<add> inOrder.verify(observer, times(1)).onNext("Hello");
<add> inOrder.verify(observer, times(1)).onNext("world!");
<add> inOrder.verify(observer, times(1)).onCompleted();
<add> inOrder.verifyNoMoreInteractions();
<add>
<add> // The resouce should be closed
<add> verify(resource, times(1)).unsubscribe();
<add> }
<add>
<add> @Test
<add> public void testUsingWithSubscribingTwice() {
<add> // When subscribe is called, a new resource should be created.
<add> Func0<Resource> resourceFactory = new Func0<Resource>() {
<add> @Override
<add> public Resource call() {
<add> return new Resource() {
<add>
<add> boolean first = true;
<add>
<add> @Override
<add> public String getTextFromWeb() {
<add> if (first) {
<add> first = false;
<add> return "Hello world!";
<add> }
<add> return "Nothing";
<add> }
<add>
<add> @Override
<add> public void unsubscribe() {
<add> }
<add>
<add> };
<add> }
<add> };
<add>
<add> Func1<Resource, Observable<String>> observableFactory = new Func1<Resource, Observable<String>>() {
<add> @Override
<add> public Observable<String> call(Resource resource) {
<add> return Observable.from(resource.getTextFromWeb().split(" "));
<add> }
<add> };
<add>
<add> @SuppressWarnings("unchecked")
<add> Observer<String> observer = (Observer<String>) mock(Observer.class);
<add> Observable<String> observable = Observable.create(using(
<add> resourceFactory, observableFactory));
<add> observable.subscribe(observer);
<add> observable.subscribe(observer);
<add>
<add> InOrder inOrder = inOrder(observer);
<add>
<add> inOrder.verify(observer, times(1)).onNext("Hello");
<add> inOrder.verify(observer, times(1)).onNext("world!");
<add> inOrder.verify(observer, times(1)).onCompleted();
<add>
<add> inOrder.verify(observer, times(1)).onNext("Hello");
<add> inOrder.verify(observer, times(1)).onNext("world!");
<add> inOrder.verify(observer, times(1)).onCompleted();
<add> inOrder.verifyNoMoreInteractions();
<add> }
<add>
<add> @Test(expected = TestException.class)
<add> public void testUsingWithResourceFactoryError() {
<add> Func0<Subscription> resourceFactory = new Func0<Subscription>() {
<add> @Override
<add> public Subscription call() {
<add> throw new TestException();
<add> }
<add> };
<add>
<add> Func1<Subscription, Observable<Integer>> observableFactory = new Func1<Subscription, Observable<Integer>>() {
<add> @Override
<add> public Observable<Integer> call(Subscription subscription) {
<add> return Observable.empty();
<add> }
<add> };
<add>
<add> Observable.create(using(resourceFactory, observableFactory))
<add> .toBlockingObservable().last();
<add> }
<add>
<add> @Test
<add> public void testUsingWithObservableFactoryError() {
<add> final Action0 unsubscribe = mock(Action0.class);
<add> Func0<Subscription> resourceFactory = new Func0<Subscription>() {
<add> @Override
<add> public Subscription call() {
<add> return Subscriptions.create(unsubscribe);
<add> }
<add> };
<add>
<add> Func1<Subscription, Observable<Integer>> observableFactory = new Func1<Subscription, Observable<Integer>>() {
<add> @Override
<add> public Observable<Integer> call(Subscription subscription) {
<add> throw new TestException();
<add> }
<add> };
<add>
<add> try {
<add> Observable.create(using(resourceFactory, observableFactory))
<add> .toBlockingObservable().last();
<add> fail("Should throw a TestException when the observableFactory throws it");
<add> } catch (TestException e) {
<add> // Make sure that unsubscribe is called so that users can close
<add> // the resource if some error happens.
<add> verify(unsubscribe, times(1)).call();
<add> }
<add> }
<add>
<add> @Test
<add> public void testUsingWithObservableFactoryErrorInOnSubscribe() {
<add> final Action0 unsubscribe = mock(Action0.class);
<add> Func0<Subscription> resourceFactory = new Func0<Subscription>() {
<add> @Override
<add> public Subscription call() {
<add> return Subscriptions.create(unsubscribe);
<add> }
<add> };
<add>
<add> Func1<Subscription, Observable<Integer>> observableFactory = new Func1<Subscription, Observable<Integer>>() {
<add> @Override
<add> public Observable<Integer> call(Subscription subscription) {
<add> return Observable.create(new OnSubscribeFunc<Integer>() {
<add> @Override
<add> public Subscription onSubscribe(Observer<? super Integer> t1) {
<add> throw new TestException();
<add> }
<add> });
<add> }
<add> };
<add>
<add> try {
<add> Observable.create(using(resourceFactory, observableFactory))
<add> .toBlockingObservable().last();
<add> fail("Should throw a TestException when the observableFactory throws it");
<add> } catch (TestException e) {
<add> // Make sure that unsubscribe is called so that users can close
<add> // the resource if some error happens.
<add> verify(unsubscribe, times(1)).call();
<add> }
<add> }
<add>} | 3 |
Python | Python | handle empty arrays in roll | be90de8e31b76db8154e8ab9cefc21c1d25d0f45 | <ide><path>numpy/core/numeric.py
<ide> def roll(a, shift, axis=None):
<ide> except IndexError:
<ide> raise ValueError('axis must be >= 0 and < %d' % a.ndim)
<ide> reshape = False
<del> shift %= n
<del> indexes = concatenate((arange(n-shift,n),arange(n-shift)))
<del> res = a.take(indexes, axis)
<del> if reshape:
<del> return res.reshape(a.shape)
<add> if n == 0:
<add> return a
<ide> else:
<del> return res
<add> shift %= n
<add> indexes = concatenate((arange(n-shift,n),arange(n-shift)))
<add> res = a.take(indexes, axis)
<add> if reshape:
<add> return res.reshape(a.shape)
<add> else:
<add> return res
<ide>
<ide> def rollaxis(a, axis, start=0):
<ide> """ | 1 |
Python | Python | rewrite np.ma.(median|sort) to use take_along_axis | 7a3c50ab427cd9c4f3125a8e72d31f9141bd558a | <ide><path>numpy/ma/core.py
<ide> def sort(self, axis=-1, kind='quicksort', order=None,
<ide> sidx = self.argsort(axis=axis, kind=kind, order=order,
<ide> fill_value=fill_value, endwith=endwith)
<ide>
<del> # save memory for 1d arrays
<del> if self.ndim == 1:
<del> idx = sidx
<del> else:
<del> idx = list(np.ix_(*[np.arange(x) for x in self.shape]))
<del> idx[axis] = sidx
<del> idx = tuple(idx)
<del>
<del> self[...] = self[idx]
<add> self[...] = np.take_along_axis(self, sidx, axis=axis)
<ide>
<ide> def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
<ide> """
<ide><path>numpy/ma/extras.py
<ide> def _median(a, axis=None, out=None, overwrite_input=False):
<ide> return np.ma.minimum_fill_value(asorted)
<ide> return s
<ide>
<del> counts = count(asorted, axis=axis)
<add> counts = count(asorted, axis=axis, keepdims=True)
<ide> h = counts // 2
<ide>
<del> # create indexing mesh grid for all but reduced axis
<del> axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape)
<del> if i != axis]
<del> ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij')
<add> # duplicate high if odd number of elements so mean does nothing
<add> odd = counts % 2 == 1
<add> l = np.where(odd, h, h-1)
<ide>
<del> # insert indices of low and high median
<del> ind.insert(axis, h - 1)
<del> low = asorted[tuple(ind)]
<del> ind[axis] = np.minimum(h, asorted.shape[axis] - 1)
<del> high = asorted[tuple(ind)]
<add> lh = np.concatenate([l,h], axis=axis)
<add>
<add> # get low and high median
<add> low_high = np.take_along_axis(asorted, lh, axis=axis)
<ide>
<ide> def replace_masked(s):
<ide> # Replace masked entries with minimum_full_value unless it all values
<ide> # are masked. This is required as the sort order of values equal or
<ide> # larger than the fill value is undefined and a valid value placed
<ide> # elsewhere, e.g. [4, --, inf].
<ide> if np.ma.is_masked(s):
<del> rep = (~np.all(asorted.mask, axis=axis)) & s.mask
<add> rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
<ide> s.data[rep] = np.ma.minimum_fill_value(asorted)
<ide> s.mask[rep] = False
<ide>
<del> replace_masked(low)
<del> replace_masked(high)
<del>
<del> # duplicate high if odd number of elements so mean does nothing
<del> odd = counts % 2 == 1
<del> np.copyto(low, high, where=odd)
<del> # not necessary for scalar True/False masks
<del> try:
<del> np.copyto(low.mask, high.mask, where=odd)
<del> except Exception:
<del> pass
<add> replace_masked(low_high)
<ide>
<ide> if np.issubdtype(asorted.dtype, np.inexact):
<ide> # avoid inf / x = masked
<del> s = np.ma.sum([low, high], axis=0, out=out)
<add> s = np.ma.sum(low_high, axis=axis, out=out)
<ide> np.true_divide(s.data, 2., casting='unsafe', out=s.data)
<ide>
<ide> s = np.lib.utils._median_nancheck(asorted, s, axis, out)
<ide> else:
<del> s = np.ma.mean([low, high], axis=0, out=out)
<add> s = np.ma.mean(low_high, axis=axis, out=out)
<ide>
<ide> return s
<ide> | 2 |
Python | Python | remove some bits from utils | 6af75d3a69c486b19adc6e2da00719094778eb31 | <ide><path>djangorestframework/response.py
<ide> from Internet Explorer user agents and use a sensible browser `Accept` header instead.
<ide> """
<ide>
<add>
<add>import re
<ide> from django.template.response import SimpleTemplateResponse
<ide> from django.core.handlers.wsgi import STATUS_CODE_TEXT
<del>
<ide> from djangorestframework.settings import api_settings
<ide> from djangorestframework.utils.mediatypes import order_by_precedence
<del>from djangorestframework.utils import MSIE_USER_AGENT_REGEX
<ide> from djangorestframework import status
<ide>
<ide>
<add>MSIE_USER_AGENT_REGEX = re.compile(r'^Mozilla/[0-9]+\.[0-9]+ \([^)]*; MSIE [0-9]+\.[0-9]+[a-z]?;[^)]*\)(?!.* Opera )')
<add>
<add>
<ide> class NotAcceptable(Exception):
<ide> pass
<ide>
<ide><path>djangorestframework/utils/__init__.py
<ide> from django.utils.encoding import smart_unicode
<ide> from django.utils.xmlutils import SimplerXMLGenerator
<del>from django.core.urlresolvers import resolve
<ide>
<ide> from djangorestframework.compat import StringIO
<ide> from djangorestframework.compat import RequestFactory as DjangoRequestFactory
<ide> import re
<ide> import xml.etree.ElementTree as ET
<ide>
<del>MSIE_USER_AGENT_REGEX = re.compile(r'^Mozilla/[0-9]+\.[0-9]+ \([^)]*; MSIE [0-9]+\.[0-9]+[a-z]?;[^)]*\)(?!.* Opera )')
<del>
<del>
<del>def as_tuple(obj):
<del> """
<del> Given an object which may be a list/tuple, another object, or None,
<del> return that object in list form.
<del>
<del> IE:
<del> If the object is already a list/tuple just return it.
<del> If the object is not None, return it in a list with a single element.
<del> If the object is None return an empty list.
<del> """
<del> if obj is None:
<del> return ()
<del> elif isinstance(obj, list):
<del> return tuple(obj)
<del> elif isinstance(obj, tuple):
<del> return obj
<del> return (obj,)
<del>
<del>
<del>def url_resolves(url):
<del> """
<del> Return True if the given URL is mapped to a view in the urlconf, False otherwise.
<del> """
<del> try:
<del> resolve(url)
<del> except Exception:
<del> return False
<del> return True
<del>
<ide>
<ide> # From xml2dict
<ide> class XML2Dict(object):
<ide><path>djangorestframework/utils/breadcrumbs.py
<ide> def breadcrumbs_recursive(url, breadcrumbs_list):
<ide> return breadcrumbs_recursive(url[:url.rfind('/') + 1], breadcrumbs_list)
<ide>
<ide> return breadcrumbs_recursive(url, [])
<del> | 3 |
PHP | PHP | apply fixes from styleci | 68e7ad761cbe9ff73ebbecbdbc8fa3e0a97311f9 | <ide><path>src/Illuminate/Database/Eloquent/Model.php
<ide> protected function incrementOrDecrement($column, $amount, $extra, $method)
<ide>
<ide> if ($this->fireModelEvent('updating') === false) {
<ide> return false;
<del> };
<add> }
<ide>
<ide> return tap($query->where(
<ide> $this->getKeyName(), $this->getKey() | 1 |
Text | Text | add jabortell to the contributors | 992723dfacc19aaec9b53de1f7250569fe089065 | <ide><path>.github/contributors/jabortell.md
<add># spaCy contributor agreement
<add>
<add>This spaCy Contributor Agreement (**"SCA"**) is based on the
<add>[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf).
<add>The SCA applies to any contribution that you make to any product or project
<add>managed by us (the **"project"**), and sets out the intellectual property rights
<add>you grant to us in the contributed materials. The term **"us"** shall mean
<add>[ExplosionAI GmbH](https://explosion.ai/legal). The term
<add>**"you"** shall mean the person or entity identified below.
<add>
<add>If you agree to be bound by these terms, fill in the information requested
<add>below and include the filled-in version with your first pull request, under the
<add>folder [`.github/contributors/`](/.github/contributors/). The name of the file
<add>should be your GitHub username, with the extension `.md`. For example, the user
<add>example_user would create the file `.github/contributors/example_user.md`.
<add>
<add>Read this agreement carefully before signing. These terms and conditions
<add>constitute a binding legal agreement.
<add>
<add>## Contributor Agreement
<add>
<add>1. The term "contribution" or "contributed materials" means any source code,
<add>object code, patch, tool, sample, graphic, specification, manual,
<add>documentation, or any other material posted or submitted by you to the project.
<add>
<add>2. With respect to any worldwide copyrights, or copyright applications and
<add>registrations, in your contribution:
<add>
<add> * you hereby assign to us joint ownership, and to the extent that such
<add> assignment is or becomes invalid, ineffective or unenforceable, you hereby
<add> grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge,
<add> royalty-free, unrestricted license to exercise all rights under those
<add> copyrights. This includes, at our option, the right to sublicense these same
<add> rights to third parties through multiple levels of sublicensees or other
<add> licensing arrangements;
<add>
<add> * you agree that each of us can do all things in relation to your
<add> contribution as if each of us were the sole owners, and if one of us makes
<add> a derivative work of your contribution, the one who makes the derivative
<add> work (or has it made will be the sole owner of that derivative work;
<add>
<add> * you agree that you will not assert any moral rights in your contribution
<add> against us, our licensees or transferees;
<add>
<add> * you agree that we may register a copyright in your contribution and
<add> exercise all ownership rights associated with it; and
<add>
<add> * you agree that neither of us has any duty to consult with, obtain the
<add> consent of, pay or render an accounting to the other for any use or
<add> distribution of your contribution.
<add>
<add>3. With respect to any patents you own, or that you can license without payment
<add>to any third party, you hereby grant to us a perpetual, irrevocable,
<add>non-exclusive, worldwide, no-charge, royalty-free license to:
<add>
<add> * make, have made, use, sell, offer to sell, import, and otherwise transfer
<add> your contribution in whole or in part, alone or in combination with or
<add> included in any product, work or materials arising out of the project to
<add> which your contribution was submitted, and
<add>
<add> * at our option, to sublicense these same rights to third parties through
<add> multiple levels of sublicensees or other licensing arrangements.
<add>
<add>4. Except as set out above, you keep all right, title, and interest in your
<add>contribution. The rights that you grant to us under these terms are effective
<add>on the date you first submitted a contribution to us, even if your submission
<add>took place before the date you sign these terms.
<add>
<add>5. You covenant, represent, warrant and agree that:
<add>
<add> * Each contribution that you submit is and shall be an original work of
<add> authorship and you can legally grant the rights set out in this SCA;
<add>
<add> * to the best of your knowledge, each contribution will not violate any
<add> third party's copyrights, trademarks, patents, or other intellectual
<add> property rights; and
<add>
<add> * each contribution shall be in compliance with U.S. export control laws and
<add> other applicable export and import laws. You agree to notify us if you
<add> become aware of any circumstance which would make any of the foregoing
<add> representations inaccurate in any respect. We may publicly disclose your
<add> participation in the project, including the fact that you have signed the SCA.
<add>
<add>6. This SCA is governed by the laws of the State of California and applicable
<add>U.S. Federal law. Any choice of law rules will not apply.
<add>
<add>7. Please place an “x” on one of the applicable statement below. Please do NOT
<add>mark both statements:
<add>
<add> * [x] I am signing on behalf of myself as an individual and no other person
<add> or entity, including my employer, has or will have rights with respect to my
<add> contributions.
<add>
<add> * [ ] I am signing on behalf of my employer or a legal entity and I have the
<add> actual authority to contractually bind that entity.
<add>
<add>## Contributor Details
<add>
<add>| Field | Entry |
<add>|------------------------------- | -------------------- |
<add>| Name | Jacob Bortell |
<add>| Company name (if applicable) | |
<add>| Title or role (if applicable) | |
<add>| Date | 2020-11-20 |
<add>| GitHub username | jabortell |
<add>| Website (optional) | | | 1 |
Python | Python | compare 'tolist' function to 'list' in example | ab90c876b0fead1b923b1616f8f50a5fded83de6 | <ide><path>numpy/core/_add_newdocs.py
<ide>
<ide> Examples
<ide> --------
<del> For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``:
<add> For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, except that it changes numpy scalars to Python scalars:
<ide>
<del> >>> a = np.array([1, 2])
<del> >>> list(a)
<add> >>> a = np.uint32([1, 2])
<add> >>> a_list = list(a)
<add> >>> a_list
<ide> [1, 2]
<del> >>> a.tolist()
<add> >>> type(a_list)
<add> <class 'list'>
<add> >>> type(a_list[0])
<add> <class 'numpy.uint32'>
<add> >>> a_tolist = a.tolist()
<add> >>> a_tolist
<ide> [1, 2]
<add> >>> type(a_tolist)
<add> <class 'list'>
<add> >>> type(a_tolist[0])
<add> <class 'int'>
<ide>
<ide> However, for a 2D array, ``tolist`` applies recursively:
<ide> | 1 |
Go | Go | move "pause" to daemon/pause.go | 42a77de3d3867e2f34b455fc343e293eed994dcd | <ide><path>daemon/daemon.go
<ide> func (daemon *Daemon) Install(eng *engine.Engine) error {
<ide> if err := eng.Register("attach", daemon.ContainerAttach); err != nil {
<ide> return err
<ide> }
<add> if err := eng.Register("pause", daemon.ContainerPause); err != nil {
<add> return err
<add> }
<ide> return nil
<ide> }
<ide>
<ide><path>daemon/pause.go
<add>package daemon
<add>
<add>import (
<add> "github.com/docker/docker/engine"
<add>)
<add>
<add>func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status {
<add> if len(job.Args) != 1 {
<add> return job.Errorf("Usage: %s CONTAINER", job.Name)
<add> }
<add> name := job.Args[0]
<add> container := daemon.Get(name)
<add> if container == nil {
<add> return job.Errorf("No such container: %s", name)
<add> }
<add> if err := container.Pause(); err != nil {
<add> return job.Errorf("Cannot pause container %s: %s", name, err)
<add> }
<add> job.Eng.Job("log", "pause", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
<add> return engine.StatusOK
<add>}
<ide><path>server/container.go
<ide> import (
<ide> "github.com/docker/docker/utils"
<ide> )
<ide>
<del>func (srv *Server) ContainerPause(job *engine.Job) engine.Status {
<del> if len(job.Args) != 1 {
<del> return job.Errorf("Usage: %s CONTAINER", job.Name)
<del> }
<del> name := job.Args[0]
<del> container := srv.daemon.Get(name)
<del> if container == nil {
<del> return job.Errorf("No such container: %s", name)
<del> }
<del> if err := container.Pause(); err != nil {
<del> return job.Errorf("Cannot pause container %s: %s", name, err)
<del> }
<del> srv.LogEvent("pause", container.ID, srv.daemon.Repositories().ImageName(container.Image))
<del> return engine.StatusOK
<del>}
<del>
<ide> func (srv *Server) ContainerUnpause(job *engine.Job) engine.Status {
<ide> if n := len(job.Args); n < 1 || n > 2 {
<ide> return job.Errorf("Usage: %s CONTAINER", job.Name)
<ide> func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
<ide> return engine.StatusOK
<ide> }
<ide>
<del>
<ide> func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
<ide> if len(job.Args) != 2 {
<ide> return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
<ide><path>server/init.go
<ide> func InitServer(job *engine.Job) engine.Status {
<ide> "restart": srv.ContainerRestart,
<ide> "start": srv.ContainerStart,
<ide> "kill": srv.ContainerKill,
<del> "pause": srv.ContainerPause,
<ide> "unpause": srv.ContainerUnpause,
<ide> "wait": srv.ContainerWait,
<ide> "tag": srv.ImageTag, // FIXME merge with "image_tag" | 4 |
Ruby | Ruby | fix layout method doc formatting | 23c4efbb5b8804bc029423ef620c1e38914e1565 | <ide><path>actionpack/lib/abstract_controller/layouts.rb
<ide> def conditional_layout?
<ide> #
<ide> # If the specified layout is a:
<ide> # String:: the String is the template name
<del> # Symbol:: call the method specified by the symbol, which will return
<del> # the template name
<add> # Symbol:: call the method specified by the symbol, which will return the template name
<ide> # false:: There is no layout
<ide> # true:: raise an ArgumentError
<ide> # | 1 |
Ruby | Ruby | fix a comment in atom_feed_helper.rb | 85119f5909db86167b8b4ed6916b5dda6d9462d5 | <ide><path>actionview/lib/action_view/helpers/atom_feed_helper.rb
<ide> def initialize(xml)
<ide> end
<ide>
<ide> private
<del> # Delegate to xml builder, first wrapping the element in a xhtml
<add> # Delegate to xml builder, first wrapping the element in an xhtml
<ide> # namespaced div element if the method and arguments indicate
<ide> # that an xhtml_block? is desired.
<ide> def method_missing(method, *arguments, &block) | 1 |
Javascript | Javascript | add a hook for adding script attributes | d4db20aaccb3f2a971736a773089e4278ca35f95 | <ide><path>lib/JsonpMainTemplatePlugin.js
<ide> JsonpMainTemplatePlugin.prototype.apply = function(mainTemplate) {
<ide> }
<ide> return source;
<ide> });
<del> mainTemplate.plugin("require-ensure", function(_, chunk, hash) {
<add> mainTemplate.plugin("jsonp-script", function(_, chunk, hash) {
<ide> var filename = this.outputOptions.filename;
<ide> var chunkFilename = this.outputOptions.chunkFilename;
<ide> var chunkMaps = chunk.getChunkMaps();
<ide> var crossOriginLoading = this.outputOptions.crossOriginLoading;
<ide> var chunkLoadTimeout = this.outputOptions.chunkLoadTimeout || 120000;
<ide> return this.asString([
<del> "if(installedChunks[chunkId] === 0)",
<del> this.indent([
<del> "return Promise.resolve()"
<del> ]),
<del> "",
<del> "// an Promise means \"currently loading\".",
<del> "if(installedChunks[chunkId]) {",
<del> this.indent([
<del> "return installedChunks[chunkId][2];"
<del> ]),
<del> "}",
<del> "// start chunk loading",
<del> "var head = document.getElementsByTagName('head')[0];",
<ide> "var script = document.createElement('script');",
<ide> "script.type = 'text/javascript';",
<ide> "script.charset = 'utf-8';",
<ide> JsonpMainTemplatePlugin.prototype.apply = function(mainTemplate) {
<ide> "}"
<ide> ]),
<ide> "};",
<add> ]);
<add> });
<add> mainTemplate.plugin("require-ensure", function(_, chunk, hash) {
<add> var chunkFilename = this.outputOptions.chunkFilename;
<add> return this.asString([
<add> "if(installedChunks[chunkId] === 0)",
<add> this.indent([
<add> "return Promise.resolve()"
<add> ]),
<add> "",
<add> "// an Promise means \"currently loading\".",
<add> "if(installedChunks[chunkId]) {",
<add> this.indent([
<add> "return installedChunks[chunkId][2];"
<add> ]),
<add> "}",
<add> "// start chunk loading",
<add> "var head = document.getElementsByTagName('head')[0];",
<add> this.applyPluginsWaterfall("jsonp-script", "", chunk, hash),
<ide> "head.appendChild(script);",
<ide> "",
<ide> "var promise = new Promise(function(resolve, reject) {", | 1 |
Python | Python | add distilbertfortokenclassification import | 1806eabf59fee4a8a79a3a80a927cbb3d0fbde45 | <ide><path>transformers/__init__.py
<ide> ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
<ide> from .modeling_distilbert import (DistilBertForMaskedLM, DistilBertModel,
<ide> DistilBertForSequenceClassification, DistilBertForQuestionAnswering,
<add> DistilBertForTokenClassification,
<ide> DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
<ide> from .modeling_encoder_decoder import PreTrainedEncoderDecoder, Model2Model
<ide> | 1 |
Java | Java | increase timeout for stomp integration tests | 59fcf5014feac51687cad6222166356bda309147 | <ide><path>spring-messaging/src/test/java/org/springframework/messaging/simp/stomp/StompBrokerRelayMessageHandlerIntegrationTests.java
<ide> public void expectAvailabilityStatusChanges(Boolean... expected) {
<ide>
<ide> public void awaitAndAssert() throws InterruptedException {
<ide> synchronized(this.monitor) {
<del> long endTime = System.currentTimeMillis() + 5000;
<add> long endTime = System.currentTimeMillis() + 6000;
<ide> while (this.expected.size() != this.actual.size() && System.currentTimeMillis() < endTime) {
<ide> this.monitor.wait(500);
<ide> } | 1 |
PHP | PHP | fix default value handling in sqlserver | bb0ee7dfbeaf4ef5620c6316ad662d15f0e38d6e | <ide><path>src/Database/Schema/SqlserverSchema.php
<ide> public function convertColumnDescription(Table $table, $row)
<ide>
<ide> $field += [
<ide> 'null' => $row['null'] === '1' ? true : false,
<del> 'default' => $row['default'],
<add> 'default' => $this->_defaultValue($row['default']),
<ide> ];
<ide> $table->addColumn($row['name'], $field);
<ide> }
<ide>
<add> /**
<add> * Manipulate the default value.
<add> *
<add> * Sqlite includes quotes and bared NULLs in default values.
<add> * We need to remove those.
<add> *
<add> * @param string|null $default The default value.
<add> * @return string|null
<add> */
<add> protected function _defaultValue($default)
<add> {
<add> if ($default === 'NULL') {
<add> return null;
<add> }
<add>
<add> // Remove quotes
<add> if (preg_match("/^'(.*)'$/", $default, $matches)) {
<add> return str_replace("''", "'", $matches[1]);
<add> }
<add>
<add> return $default;
<add> }
<add>
<ide> /**
<ide> * {@inheritDoc}
<ide> */
<ide><path>tests/TestCase/Database/Schema/SqlserverSchemaTest.php
<ide> protected function _createTables($connection)
<ide> created DATETIME,
<ide> field1 VARCHAR(10) DEFAULT NULL,
<ide> field2 VARCHAR(10) DEFAULT 'NULL',
<add>field3 VARCHAR(10) DEFAULT 'O''hare',
<ide> CONSTRAINT [content_idx] UNIQUE ([title], [body]),
<ide> CONSTRAINT [author_idx] FOREIGN KEY ([author_id]) REFERENCES [schema_authors] ([id]) ON DELETE CASCADE ON UPDATE CASCADE
<ide> )
<ide> public function testDescribeTable()
<ide> 'fixed' => null,
<ide> 'comment' => null,
<ide> ],
<add> 'field3' => [
<add> 'type' => 'string',
<add> 'null' => true,
<add> 'default' => 'O\'hare',
<add> 'length' => 10,
<add> 'precision' => null,
<add> 'fixed' => null,
<add> 'comment' => null,
<add> ],
<ide> ];
<ide> $this->assertEquals(['id'], $result->primaryKey());
<ide> foreach ($expected as $field => $definition) { | 2 |
Ruby | Ruby | create check_broken_dependents method | 24e7f55a4c046463b3f2b8ae2c0383a67897c20d | <ide><path>Library/Homebrew/upgrade.rb
<ide> def upgrade_formula(f, args:)
<ide> end
<ide> private_class_method :upgrade_formula
<ide>
<del> def check_installed_dependents(args:)
<del> installed_formulae = FormulaInstaller.installed.to_a
<del> return if installed_formulae.empty?
<del>
<del> already_broken_dependents = CacheStoreDatabase.use(:linkage) do |db|
<add> def check_broken_dependents(installed_formulae)
<add> CacheStoreDatabase.use(:linkage) do |db|
<ide> installed_formulae.flat_map(&:runtime_installed_formula_dependents)
<ide> .uniq
<ide> .select do |f|
<ide> def check_installed_dependents(args:)
<ide> .broken_library_linkage?
<ide> end.compact
<ide> end
<add> end
<add>
<add> def check_installed_dependents(args:)
<add> installed_formulae = FormulaInstaller.installed.to_a
<add> return if installed_formulae.empty?
<add>
<add> already_broken_dependents = check_broken_dependents(installed_formulae)
<ide>
<ide> outdated_dependents =
<ide> installed_formulae.flat_map(&:runtime_installed_formula_dependents)
<ide> def check_installed_dependents(args:)
<ide>
<ide> # Assess the dependents tree again now we've upgraded.
<ide> oh1 "Checking for dependents of upgraded formulae..." unless args.dry_run?
<del> broken_dependents = CacheStoreDatabase.use(:linkage) do |db|
<del> installed_formulae.flat_map(&:runtime_installed_formula_dependents)
<del> .uniq
<del> .select do |f|
<del> keg = f.opt_or_installed_prefix_keg
<del> next unless keg
<del>
<del> LinkageChecker.new(keg, cache_db: db)
<del> .broken_library_linkage?
<del> end.compact
<del> end
<add> broken_dependents = check_broken_dependents(installed_formulae)
<ide> if broken_dependents.blank?
<ide> if args.dry_run?
<ide> ohai "No currently broken dependents found!" | 1 |
PHP | PHP | fix coding style | 9d8c11d5190e81b879f1b2bfdb4f4acfe9d94b88 | <ide><path>src/Illuminate/Foundation/Testing/TestResponse.php
<ide> public function assertJsonValidationErrors($errors)
<ide> }
<ide> }
<ide>
<del> if (!$hasError) {
<add> if (! $hasError) {
<ide> PHPUnit::fail(
<del> "Failed to find a validation error in the response for key and message: '$key' => '$value'" . PHP_EOL . PHP_EOL . $errorMessage
<add> "Failed to find a validation error in the response for key and message: '$key' => '$value'".PHP_EOL.PHP_EOL.$errorMessage
<ide> );
<ide> }
<ide> } | 1 |
Text | Text | add docs for container/image labels | 389eee1084ea7613fa56e5f6b3e24678bf9aebc2 | <ide><path>docs/sources/reference/api/docker_remote_api.md
<ide> to an image. For example you could add data describing the content of an image.
<ide> **New!**
<ide> Docker client now hints potential proxies about connection hijacking using HTTP Upgrade headers.
<ide>
<add>`POST /containers/create`
<add>
<add>**New!**
<add>You can set labels on container create describing the container.
<add>
<add>`GET /containers/json`
<add>
<add>**New!**
<add>This endpoint now returns the labels associated with each container (`Labels`).
<add>
<ide> `GET /containers/(id)/json`
<ide>
<ide> **New!**
<ide> This endpoint now returns the list current execs associated with the container (`ExecIDs`).
<add>This endpoint now returns the container labels (`Config.Labels`).
<ide>
<ide> `POST /containers/(id)/rename`
<ide>
<ide> root filesystem as read only.
<ide> **New!**
<ide> This endpoint returns a live stream of a container's resource usage statistics.
<ide>
<add>`GET /images/json`
<add>
<add>**New!**
<add>This endpoint now returns the labels associated with each image (`Labels`).
<add>
<add>
<ide> ## v1.16
<ide>
<ide> ### Full Documentation
<ide><path>docs/sources/reference/api/docker_remote_api_v1.17.md
<ide> Json Parameters:
<ide> - **OpenStdin** - Boolean value, opens stdin,
<ide> - **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects.
<ide> - **Env** - A list of environment variables in the form of `VAR=value`
<add>- **Labels** - A list of labels that will applied in the form of `VAR=value`
<ide> - **Cmd** - Command to run specified as a string or an array of strings.
<ide> - **Entrypoint** - Set the entrypoint for the container a a string or an array
<ide> of strings
<ide> Return low-level information on the container `id`
<ide> "ExposedPorts": null,
<ide> "Hostname": "ba033ac44011",
<ide> "Image": "ubuntu",
<add> "Labels": {
<add> "Vendor": "Acme",
<add> "License": "GPL",
<add> "Version": "1.0"
<add> },
<ide> "MacAddress": "",
<ide> "Memory": 0,
<ide> "MemorySwap": 0,
<ide> Return low-level information on the image `name`
<ide> "Cmd": ["/bin/bash"],
<ide> "Dns": null,
<ide> "Image": "ubuntu",
<del> "Labels": null,
<add> "Labels": {
<add> "Vendor": "Acme",
<add> "License": "GPL",
<add> "Version": "1.0"
<add> },
<ide> "Volumes": null,
<ide> "VolumesFrom": "",
<ide> "WorkingDir": ""
<ide><path>docs/sources/reference/commandline/cli.md
<ide> removed before the image is removed.
<ide> --link=[] Add link to another container
<ide> --lxc-conf=[] Add custom lxc options
<ide> -m, --memory="" Memory limit
<add> -l, --label=[] Set meta data on a container, for example com.example.key=value
<add> -label-file=[] Read in a line delimited file of labels
<ide> --mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33)
<ide> --memory-swap="" Total memory (memory + swap), '-1' to disable swap
<ide> --name="" Assign a name to the container | 3 |
Ruby | Ruby | mutate the path string to avoid object allocations | b610104e5ce9c148e67cf7f73c8c3e644b2077f9 | <ide><path>actionpack/lib/action_dispatch/http/url.rb
<ide> def url_for(options)
<ide>
<ide> if options[:trailing_slash]
<ide> if path.include?('?')
<del> result << path.sub(/\?/, '/\&')
<add> path.sub!(/\?/, '/\&')
<ide> else
<del> result << path.sub(/[^\/]\z|\A\z/, '\&/')
<add> path.sub!(/[^\/]\z|\A\z/, '\&/')
<ide> end
<del> else
<del> result << path
<ide> end
<ide>
<add> result << path
<add>
<ide> if options.key? :params
<ide> params = options[:params].is_a?(Hash) ?
<ide> options[:params] : | 1 |
Python | Python | correct automatic sort | fddca794a2f9bfa9c0feb35cb542ddbed8415f46 | <ide><path>glances/core/glances_globals.py
<ide> # Instances shared between all Glances's scripts
<ide> #===============================================
<ide>
<del># The global instance for the configuration file
<del># from ..core.glances_config import Config as glancesConfig
<del># glances_config = glancesConfig()
<del># Processcount and processlist plugins
<del>from ..core.glances_processes import glancesProcesses
<add># glances_processes for processcount and processlist plugins
<add>from glances.core.glances_processes import glancesProcesses
<ide> glances_processes = glancesProcesses()
<del># Default auto process sort is 'cpu_percent'
<del>process_auto_by = 'cpu_percent'
<add>
<ide> # The global instance for the logs
<del>from ..core.glances_logs import glancesLogs
<add>from glances.core.glances_logs import glancesLogs
<ide> glances_logs = glancesLogs()
<ide><path>glances/core/glances_logs.py
<ide> from datetime import datetime
<ide>
<ide> # Import Glances libs
<del>from glances.core.glances_globals import process_auto_by
<add>from glances.core.glances_globals import glances_processes
<ide>
<ide>
<ide> class glancesLogs:
<ide> def __init__(self):
<ide> # Init the logs list
<ide> self.logs_list = []
<ide>
<del> # Automaticaly define the sort to apply on the processes list
<del> self.sort_process_by = 'none'
<del>
<del>
<ide> def get(self):
<ide> """
<ide> Return the logs list (RAW)
<ide> """
<ide> return self.logs_list
<ide>
<del>
<ide> def len(self):
<ide> """
<ide> Return the number of item in the log list
<ide> """
<ide> return self.logs_list.__len__()
<ide>
<del>
<ide> def __itemexist__(self, item_type):
<ide> """
<ide> An item exist in the list if:
<ide> def __itemexist__(self, item_type):
<ide> return i
<ide> return -1
<ide>
<del>
<del> def add(self, item_state, item_type, item_value, proc_list=[], proc_desc=""):
<add> def set_process_sort(self, item_type):
<ide> """
<del> If item is a 'new one':
<del> Add the new item at the beginning of the logs list
<del> Else:
<del> Update the existing item
<add> Define the process auto sort key from the alert type
<ide> """
<del>
<del> # Add Top process sort depending on alert type
<del> process_auto_by = 'cpu_percent'
<add> # Process sort depending on alert type
<ide> if (item_type.startswith("MEM")):
<ide> # Sort TOP process by memory_percent
<ide> process_auto_by = 'memory_percent'
<del> elif (item_type.startswith("CPU IO")):
<add> elif (item_type.startswith("CPU_IOWAIT")):
<ide> # Sort TOP process by io_counters (only for Linux OS)
<ide> process_auto_by = 'io_counters'
<del> elif (item_type.startswith("MON")):
<del> # !!! Never in v2 because MON are not logged...
<del> # Do no sort process for monitored prcesses list
<del> self.sort_process_by = 'none'
<add> else:
<add> # Default sort is...
<add> process_auto_by = 'cpu_percent'
<add>
<add> glances_processes.setsortkey(process_auto_by)
<add>
<add> return process_auto_by
<add>
<add> def reset_process_sort(self):
<add> """
<add> Reset the process_auto_by variable
<add> """
<add> # Default sort is...
<add> process_auto_by = 'cpu_percent'
<add>
<add> glances_processes.setsortkey(process_auto_by)
<add>
<add> return process_auto_by
<ide>
<add> def add(self, item_state, item_type, item_value, proc_list=[], proc_desc=""):
<add> """
<add> If item is a 'new one':
<add> Add the new item at the beginning of the logs list
<add> Else:
<add> Update the existing item
<add> """
<ide> # Add or update the log
<ide> item_index = self.__itemexist__(item_type)
<ide> if (item_index < 0):
<ide> # Item did not exist, add if WARNING or CRITICAL
<ide> if ((item_state == "WARNING") or (item_state == "CRITICAL")):
<add> # Define the automatic process sort key
<add> self.set_process_sort(item_type)
<add>
<add> # Create the new log item
<ide> # Time is stored in Epoch format
<ide> # Epoch -> DMYHMS = datetime.fromtimestamp(epoch)
<ide> item = []
<ide> def add(self, item_state, item_type, item_value, proc_list=[], proc_desc=""):
<ide> item.append(item_value) # MIN
<ide> item.append(item_value) # SUM
<ide> item.append(1) # COUNT
<del> # Process list is sorted automaticaly
<add> # Process list is sorted automaticaly
<ide> # Overwrite the user choise
<del> topprocess = sorted(proc_list, key=lambda process: process[process_auto_by],
<del> reverse=True)
<del> item.append(topprocess[0:3]) # TOP 3 PROCESS LIST
<add> # topprocess = sorted(proc_list, key=lambda process: process[process_auto_by],
<add> # reverse=True)
<add> # item.append(topprocess[0:3]) # TOP 3 PROCESS LIST
<add> item.append([]) # TOP 3 PROCESS LIST
<ide> item.append(proc_desc) # MONITORED PROCESSES DESC
<add>
<add> # Add the item to the list
<ide> self.logs_list.insert(0, item)
<ide> if self.len() > self.logs_max:
<ide> self.logs_list.pop()
<ide> else:
<ide> # Item exist, update
<ide> if ((item_state == "OK") or (item_state == "CAREFUL")):
<add> # Reset the automatic process sort key
<add> self.reset_process_sort()
<add>
<ide> # Close the item
<ide> self.logs_list[item_index][1] = time.mktime(
<ide> datetime.now().timetuple())
<del> # TOP PROCESS LIST
<del> self.logs_list[item_index][9] = []
<ide> else:
<ide> # Update the item
<ide> # State
<ide> def add(self, item_state, item_type, item_value, proc_list=[], proc_desc=""):
<ide> self.logs_list[item_index][8] += 1
<ide> self.logs_list[item_index][5] = (self.logs_list[item_index][7] /
<ide> self.logs_list[item_index][8])
<del> # Process list is sorted automaticaly
<del> # Overwrite the user choise
<del> topprocess = sorted(proc_list, key=lambda process: process[process_auto_by],
<del> reverse=True)
<ide> # TOP PROCESS LIST
<del> self.logs_list[item_index][9] = topprocess[0:3]
<add> # # Process list is sorted automaticaly
<add> # # Overwrite the user choise
<add> # topprocess = sorted(proc_list, key=lambda process: process[process_auto_by],
<add> # reverse=True)
<add> # # TOP PROCESS LIST
<add> # self.logs_list[item_index][9] = topprocess[0:3]
<add> self.logs_list[item_index][9] = []
<ide> # MONITORED PROCESSES DESC
<ide> self.logs_list[item_index][10] = proc_desc
<ide>
<ide> return self.len()
<ide>
<del>
<ide> def clean(self, critical=False):
<ide> """
<ide> Clean the log list by deleting finished item
<ide><path>glances/core/glances_processes.py
<ide> def __init__(self, cache_timeout=60):
<ide> # value = [ read_bytes_old, write_bytes_old ]
<ide> self.io_old = {}
<ide> # Init
<add> self.processsort = 'cpu_percent'
<ide> self.processlist = []
<ide> self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}
<ide>
<ide> def getlist(self, sortedby=None):
<ide> """
<ide> return self.processlist
<ide>
<add> def getsortkey(self):
<add> """
<add> Return the current sort key for automatic sort
<add> """
<add> return self.processsort
<add>
<add> def setsortkey(self, sortedby):
<add> """
<add> Return the current sort key for automatic sort
<add> """
<add> self.processsort = sortedby
<add> return self.processsort
<add>
<ide> def getsortlist(self, sortedby=None):
<ide> """
<ide> Return the processlist
<ide><path>glances/plugins/glances_processcount.py
<ide>
<ide> # Import Glances libs
<ide> from glances.plugins.glances_plugin import GlancesPlugin
<del>from glances.core.glances_globals import glances_processes, process_auto_by
<add>from glances.core.glances_globals import glances_processes
<ide>
<ide>
<ide> class Plugin(GlancesPlugin):
<ide> def msg_curse(self, args=None):
<ide> if (args.process_sorted_by == 'auto'):
<ide> msg = "{0}".format(_("sorted automatically"))
<ide> ret.append(self.curse_add_line(msg))
<del> msg = " {0} {1}".format(_("by"), process_auto_by)
<add> msg = " {0} {1}".format(_("by"), glances_processes.getsortkey())
<ide> ret.append(self.curse_add_line(msg))
<ide> else:
<ide> msg = "{0} {1}".format(_("sorted by"), args.process_sorted_by)
<ide><path>glances/plugins/glances_processlist.py
<ide>
<ide> # Import Glances libs
<ide> from glances.plugins.glances_plugin import GlancesPlugin
<del>from glances.core.glances_globals import glances_processes, process_auto_by
<add>from glances.core.glances_globals import glances_processes
<ide>
<ide>
<ide> class Plugin(GlancesPlugin):
<ide> def msg_curse(self, args=None):
<ide>
<ide> # Compute the sort key
<ide> if (args.process_sorted_by == 'auto'):
<del> process_sort_key = process_auto_by
<add> process_sort_key = glances_processes.getsortkey()
<ide> else:
<ide> process_sort_key = args.process_sorted_by
<ide> sort_style = 'BOLD' | 5 |
Go | Go | fix a double rlock bug | 34837febc42859f7e8804a3417db133c963e38b3 | <ide><path>plugin/store.go
<ide> func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, er
<ide> * bypassing the daemon. For such tests, this check is necessary.
<ide> */
<ide> if ps != nil {
<del> ps.RLock()
<ide> result = ps.getAllByCap(capability)
<del> ps.RUnlock()
<ide> }
<ide>
<ide> // Lookup with legacy model | 1 |
Ruby | Ruby | add tests for `blank?` | 9140720dcfee74845993e7d7fd2c38855e2ce1e6 | <ide><path>activesupport/test/core_ext/date_ext_test.rb
<ide> def test_date_acts_like_date
<ide> assert Date.new.acts_like_date?
<ide> end
<ide>
<add> def test_blank?
<add> assert_not Date.new.blank?
<add> end
<add>
<ide> def test_freeze_doesnt_clobber_memoized_instance_methods
<ide> assert_nothing_raised do
<ide> Date.today.freeze.inspect
<ide><path>activesupport/test/core_ext/date_time_ext_test.rb
<ide> def test_acts_like_time
<ide> assert DateTime.new.acts_like_time?
<ide> end
<ide>
<add> def test_blank?
<add> assert_not DateTime.new.blank?
<add> end
<add>
<ide> def test_utc?
<ide> assert_equal true, DateTime.civil(2005, 2, 21, 10, 11, 12).utc?
<ide> assert_equal true, DateTime.civil(2005, 2, 21, 10, 11, 12, 0).utc?
<ide><path>activesupport/test/core_ext/time_with_zone_test.rb
<ide> def test_acts_like_date
<ide> assert_equal false, ActiveSupport::TimeWithZone.new(DateTime.civil(2000), @time_zone).acts_like?(:date)
<ide> end
<ide>
<add> def test_blank?
<add> assert_not @twz.blank?
<add> end
<add>
<ide> def test_is_a
<ide> assert_kind_of Time, @twz
<ide> assert_kind_of Time, @twz | 3 |
Go | Go | remove platform-check (was used for lcow) | 6ccda5a04111680544eadb6fbc916435ea073e69 | <ide><path>daemon/daemon.go
<ide> func (daemon *Daemon) restore() error {
<ide> log.WithError(err).Error("failed to load container")
<ide> return
<ide> }
<del> if !system.IsOSSupported(c.OS) {
<del> log.Errorf("failed to load container: %s (%q)", system.ErrNotSupportedOperatingSystem, c.OS)
<del> return
<del> }
<ide> // Ignore the container if it does not support the current driver being used by the graph
<ide> if (c.Driver == "" && daemon.graphDriver == "aufs") || c.Driver == daemon.graphDriver {
<ide> rwlayer, err := daemon.imageService.GetLayerByID(c.ID) | 1 |
Go | Go | remove unused tov1endpoint() | c8754f44d7031de4d0d0b0e1a69626e8540d2575 | <ide><path>registry/service.go
<ide> type APIEndpoint struct {
<ide> TLSConfig *tls.Config
<ide> }
<ide>
<del>// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint
<del>// Deprecated: this function is deprecated and will be removed in a future update
<del>func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint {
<del> return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders)
<del>}
<del>
<ide> // TLSConfig constructs a client TLS configuration based on server defaults
<ide> func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) {
<ide> s.mu.Lock() | 1 |
Text | Text | improve stream documentation | 3ca79350d942e88aea64faf64555beec35e22ad2 | <ide><path>doc/api/stream.md
<ide> The Readable can switch back to paused mode using one of the following:
<ide>
<ide> * If there are no pipe destinations, by calling the
<ide> [`stream.pause()`][stream-pause] method.
<del>* If there are pipe destinations, by removing any [`'data'`][] event
<del> handlers, and removing all pipe destinations by calling the
<add>* If there are pipe destinations, by removing all pipe destinations.
<add> Multiple pipe destinations may be removed by calling the
<ide> [`stream.unpipe()`][] method.
<ide>
<ide> The important concept to remember is that a Readable will not generate data
<ide> write succeeded.
<ide>
<ide> All calls to `writable.write()` that occur between the time `writable._write()`
<ide> is called and the `callback` is called will cause the written data to be
<del>buffered. Once the `callback` is invoked, the stream will emit a [`'drain'`][]
<add>buffered. When the `callback` is invoked, the stream might emit a [`'drain'`][]
<ide> event. If a stream implementation is capable of processing multiple chunks of
<ide> data at once, the `writable._writev()` method should be implemented.
<ide> | 1 |
Python | Python | add missing symbols | 147448b65b5781ce692000e3efa13cde526aa6d6 | <ide><path>spacy/lang/ga/tokenizer_exceptions.py
<ide> # encoding: utf8
<ide> from __future__ import unicode_literals
<ide>
<del>from ...symbols import ORTH, LEMMA, NORM, POS
<add>from ...symbols import POS, DET, ADP, CCONJ, ADV, NOUN, X, AUX
<add>from ...symbols import ORTH, LEMMA, NORM
<ide>
<ide>
<ide> _exc = { | 1 |
Python | Python | fix init_model arg | 86aad11939c5e60664789c485b5bdb87a8a8ddfd | <ide><path>spacy/cli/init_model.py
<ide> @plac.annotations(
<ide> lang=("model language", "positional", None, str),
<ide> output_dir=("model output directory", "positional", None, Path),
<del> freqs_loc=("location of words frequencies file", "optional", "f", Path),
<add> freqs_loc=("location of words frequencies file", "option", "f", Path),
<ide> jsonl_loc=("location of JSONL-formatted attributes file", "option", "j", Path),
<ide> clusters_loc=("optional: location of brown clusters data",
<ide> "option", "c", str), | 1 |
Javascript | Javascript | fix change challenge model to pojo | 50b1e61e70588b2894562c6ba97fa5b355a02361 | <ide><path>gulpfile.js
<ide> gulp.task('serve', ['build-manifest'], function(cb) {
<ide> ignore: paths.serverIgnore,
<ide> exec: path.join(__dirname, 'node_modules/.bin/babel-node'),
<ide> env: {
<del> 'NODE_ENV': 'development',
<add> 'NODE_ENV': process.env.NODE_ENV || 'development',
<ide> 'DEBUG': process.env.DEBUG || 'freecc:*'
<ide> }
<ide> })
<ide><path>server/boot/challenge.js
<ide> function getRenderData$(user, challenge$, origChallengeName, solution) {
<ide> debug('looking for %s', testChallengeName);
<ide>
<ide> return challenge$
<add> .map(challenge => challenge.toJSON())
<ide> .filter((challenge) => {
<ide> return testChallengeName.test(challenge.name) &&
<ide> shouldNotFilterComingSoon(challenge);
<ide> function getRenderData$(user, challenge$, origChallengeName, solution) {
<ide> });
<ide> }
<ide>
<del> console.log(challenge.helpRoom);
<ide> // save user does nothing if user does not exist
<ide> return Observable.just({
<ide> data: {
<ide> module.exports = function(app) {
<ide> app.use(router);
<ide>
<ide> function redirectToCurrentChallenge(req, res, next) {
<del> const challengeId = req.query.id || req.cookies.currentChallengeId;
<add> let challengeId = req.query.id || req.cookies.currentChallengeId;
<add> // prevent serialized null/undefined from breaking things
<add> if (challengeId === 'undefined' || challengeId === 'null') {
<add> challengeId = null;
<add> }
<ide> getChallengeById$(challenge$, challengeId)
<ide> .doOnNext(({ dashedName })=> {
<ide> if (!dashedName) {
<ide> module.exports = function(app) {
<ide> }
<ide>
<ide> function redirectToNextChallenge(req, res, next) {
<del> const challengeId = req.query.id || req.cookies.currentChallengeId;
<add> let challengeId = req.query.id || req.cookies.currentChallengeId;
<add> if (challengeId === 'undefined' || challengeId === 'null') {
<add> challengeId = null;
<add> }
<ide>
<ide> Observable.combineLatest(
<ide> firstChallenge$,
<ide> module.exports = function(app) {
<ide> return res.redirect(redirectUrl);
<ide> }
<ide> var view = challengeView[data.challengeType];
<del> res.cookie('currentChallengeId', data.id);
<del> console.log(data.helpRoom);
<add> if (data.id) {
<add> res.cookie('currentChallengeId', data.id);
<add> }
<ide> res.render(view, data);
<ide> },
<ide> next,
<ide><path>server/middlewares/error-handlers.js
<ide> export default function prodErrorHandler() {
<ide> msg: message
<ide> });
<ide> }
<del> return res.redirect('/');
<add> return res.redirect('/map');
<ide> // json
<ide> } else if (type === 'json') {
<ide> res.setHeader('Content-Type', 'application/json'); | 3 |
Go | Go | remove error return from check graph driver func | 3011aa4e9984b0631b67f640a191677e2f3d0a8f | <ide><path>daemon/graphdriver/driver.go
<ide> func New(root string, options []string) (driver Driver, err error) {
<ide> return nil, fmt.Errorf("No supported storage backend found")
<ide> }
<ide>
<del>func checkPriorDriver(name string, root string) error {
<del>
<del> var priorDrivers []string
<del>
<add>func checkPriorDriver(name, root string) {
<add> priorDrivers := []string{}
<ide> for prior := range drivers {
<del> if _, err := os.Stat(path.Join(root, prior)); err == nil && prior != name {
<del> priorDrivers = append(priorDrivers, prior)
<add> if prior != name {
<add> if _, err := os.Stat(path.Join(root, prior)); err == nil {
<add> priorDrivers = append(priorDrivers, prior)
<add> }
<ide> }
<ide> }
<del>
<ide> if len(priorDrivers) > 0 {
<ide> log.Warnf("graphdriver %s selected. Warning: your graphdriver directory %s already contains data managed by other graphdrivers: %s", name, root, strings.Join(priorDrivers, ","))
<ide> }
<del>
<del> return nil
<ide> } | 1 |
Go | Go | remove used param on parsehost | ba973f2d74c150154390aed1a5aed8fb5d0673b8 | <ide><path>api/client/cli.go
<ide> func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientF
<ide>
<ide> switch len(hosts) {
<ide> case 0:
<del> defaultHost := os.Getenv("DOCKER_HOST")
<del> hosts = []string{defaultHost}
<add> hosts = []string{os.Getenv("DOCKER_HOST")}
<ide> case 1:
<ide> // only accept one host to talk to
<ide> default:
<ide> return errors.New("Please specify only one -H")
<ide> }
<ide>
<del> defaultHost := opts.DefaultTCPHost
<del> if clientFlags.Common.TLSOptions != nil {
<del> defaultHost = opts.DefaultTLSHost
<del> }
<ide> var e error
<del> if hosts[0], e = opts.ParseHost(defaultHost, hosts[0]); e != nil {
<add> if hosts[0], e = opts.ParseHost(hosts[0]); e != nil {
<ide> return e
<ide> }
<ide>
<ide><path>docker/daemon.go
<ide> func (cli *DaemonCli) CmdDaemon(args ...string) error {
<ide> }
<ide> serverConfig = setPlatformServerConfig(serverConfig, cli.Config)
<ide>
<del> defaultHost := opts.DefaultHost
<ide> if commonFlags.TLSOptions != nil {
<ide> if !commonFlags.TLSOptions.InsecureSkipVerify {
<ide> // server requires and verifies client's certificate
<ide> func (cli *DaemonCli) CmdDaemon(args ...string) error {
<ide> logrus.Fatal(err)
<ide> }
<ide> serverConfig.TLSConfig = tlsConfig
<del> defaultHost = opts.DefaultTLSHost
<ide> }
<ide>
<ide> for i := 0; i < len(commonFlags.Hosts); i++ {
<ide> var err error
<del> if commonFlags.Hosts[i], err = opts.ParseHost(defaultHost, commonFlags.Hosts[i]); err != nil {
<add> if commonFlags.Hosts[i], err = opts.ParseHost(commonFlags.Hosts[i]); err != nil {
<ide> logrus.Fatalf("error parsing -H %s : %v", commonFlags.Hosts[i], err)
<ide> }
<ide> }
<ide><path>opts/opts.go
<ide> func ValidateHost(val string) (string, error) {
<ide> }
<ide>
<ide> // ParseHost and set defaults for a Daemon host string
<del>func ParseHost(defaultHTTPHost, val string) (string, error) {
<add>func ParseHost(val string) (string, error) {
<ide> host, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultUnixSocket, val)
<ide> if err != nil {
<ide> return val, err
<ide><path>opts/opts_test.go
<ide> func TestParseHost(t *testing.T) {
<ide> }
<ide>
<ide> for value, errorMessage := range invalid {
<del> if _, err := ParseHost(defaultHTTPHost, value); err == nil || err.Error() != errorMessage {
<add> if _, err := ParseHost(value); err == nil || err.Error() != errorMessage {
<ide> t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err)
<ide> }
<ide> }
<ide> for value, expected := range valid {
<del> if actual, err := ParseHost(defaultHTTPHost, value); err != nil || actual != expected {
<add> if actual, err := ParseHost(value); err != nil || actual != expected {
<ide> t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err)
<ide> }
<ide> } | 4 |
Ruby | Ruby | remove deprecation notice from system "xcodebuild" | 479f4bc7cd731a481bde9beb9dc21a09e1494a17 | <ide><path>Library/Homebrew/formula.rb
<ide> def system cmd, *args
<ide> ohai "#{cmd} #{pretty_args*' '}".strip
<ide>
<ide> if cmd.to_s.start_with? "xcodebuild"
<del> opoo %{system "xcodebuild" is deprecated, use the xcodebuild method instead}
<ide> removed_ENV_variables.update(ENV.remove_cc_etc)
<ide> end
<ide> | 1 |
Ruby | Ruby | catch time.zone before testhelper resets it to utc | 3b953da779cfd06b6fe4cb94e7fd8b05cc10b6f6 | <ide><path>activesupport/test/current_attributes_test.rb
<ide> class Session < ActiveSupport::CurrentAttributes
<ide> # Eagerly set-up `instance`s by reference.
<ide> [ Current.instance, Session.instance ]
<ide>
<del> setup { @original_time_zone = Time.zone }
<del> teardown { Time.zone = @original_time_zone }
<add> # Use library specific minitest hook to catch Time.zone before reset is called via TestHelper
<add> def before_setup
<add> @original_time_zone = Time.zone
<add> super
<add> end
<add>
<add> # Use library specific minitest hook to set Time.zone after reset is called via TestHelper
<add> def after_teardown
<add> super
<add> Time.zone = @original_time_zone
<add> end
<ide>
<ide> setup { assert_nil Session.previous, "Expected Session to not have leaked state" }
<ide> | 1 |
Python | Python | implement cors for the restful api | 72017271f7445224944d00c1cd59f00072962575 | <ide><path>glances/outputs/glances_bottle.py
<ide>
<ide> # Import mandatory Bottle lib
<ide> try:
<del> from bottle import Bottle, template, static_file, TEMPLATE_PATH, abort, response
<add> from bottle import Bottle, template, static_file, TEMPLATE_PATH, abort, response, request
<ide> except ImportError:
<ide> logger.critical('Bottle module not found. Glances cannot start in web server mode.')
<ide> sys.exit(2)
<ide> def __init__(self, args=None):
<ide>
<ide> # Init Bottle
<ide> self._app = Bottle()
<add> # Enable CORS (issue #479)
<add> self._app.install(EnableCors())
<add> # Define routes
<ide> self._route()
<ide>
<ide> # Update the template path (glances/outputs/bottle)
<ide> def _favicon(self):
<ide> # Return the static file
<ide> return static_file('favicon.ico', root=self.STATIC_PATH)
<ide>
<add> def enable_cors(self):
<add> """Enable CORS"""
<add> response.headers['Access-Control-Allow-Origin'] = '*'
<add> response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
<add> response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
<add>
<ide> def _api_plugins(self):
<ide> """
<ide> Glances API RESTFul implementation
<ide> def display(self, stats, refresh_time=None):
<ide> }
<ide>
<ide> return template('base', refresh_time=refresh_time, stats=stats)
<add>
<add>
<add>class EnableCors(object):
<add> name = 'enable_cors'
<add> api = 2
<add>
<add> def apply(self, fn, context):
<add> def _enable_cors(*args, **kwargs):
<add> # set CORS headers
<add> response.headers['Access-Control-Allow-Origin'] = '*'
<add> response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
<add> response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
<add>
<add> if request.method != 'OPTIONS':
<add> # actual request; reply with the actual response
<add> return fn(*args, **kwargs)
<add>
<add> return _enable_cors | 1 |
PHP | PHP | use getparsedbody() instead of getdata() | 8d54a89e5f3b762bd20c34560b16236ea4cb79aa | <ide><path>src/Http/Middleware/CsrfProtectionMiddleware.php
<ide>
<ide> use Cake\Http\Cookie\Cookie;
<ide> use Cake\Http\Exception\InvalidCsrfTokenException;
<del>use Cake\Http\ServerRequest;
<add>use Cake\Http\Response;
<ide> use Cake\I18n\Time;
<ide> use Cake\Utility\Hash;
<ide> use Cake\Utility\Security;
<ide> public function __construct(array $config = [])
<ide> /**
<ide> * Checks and sets the CSRF token depending on the HTTP verb.
<ide> *
<del> * @param \Cake\Http\ServerRequest $request The request.
<add> * @param \Psr\Http\Message\ServerRequestInterface $request The request.
<ide> * @param \Psr\Http\Server\RequestHandlerInterface $handler The request handler.
<ide> * @return \Psr\Http\Message\ResponseInterface A response.
<ide> */
<ide> public function process(ServerRequestInterface $request, RequestHandlerInterface
<ide> /**
<ide> * Checks if the request is POST, PUT, DELETE or PATCH and validates the CSRF token
<ide> *
<del> * @param \Cake\Http\ServerRequest $request The request object.
<del> * @return \Cake\Http\ServerRequest
<add> * @param \Psr\Http\Message\ServerRequestInterface $request The request object.
<add> * @return \Psr\Http\Message\ServerRequestInterface
<ide> */
<del> protected function _validateAndUnsetTokenField(ServerRequest $request): ServerRequest
<add> protected function _validateAndUnsetTokenField(ServerRequestInterface $request): ServerRequestInterface
<ide> {
<del> if (in_array($request->getMethod(), ['PUT', 'POST', 'DELETE', 'PATCH'], true) || $request->getData()) {
<add> if (in_array($request->getMethod(), ['PUT', 'POST', 'DELETE', 'PATCH'], true)
<add> || $request->getParsedBody()
<add> ) {
<ide> $this->_validateToken($request);
<ide> $body = $request->getParsedBody();
<ide> if (is_array($body)) {
<ide> protected function _addTokenToRequest(string $token, ServerRequestInterface $req
<ide> * @param \Cake\Http\Response $response The response.
<ide> * @return \Cake\Http\Response $response Modified response.
<ide> */
<del> protected function _addTokenCookie(string $token, ServerRequestInterface $request, ResponseInterface $response): ResponseInterface
<add> protected function _addTokenCookie(string $token, ServerRequestInterface $request, Response $response): Response
<ide> {
<ide> $expiry = new Time($this->_config['expiry']);
<ide>
<ide> protected function _addTokenCookie(string $token, ServerRequestInterface $reques
<ide> /**
<ide> * Validate the request data against the cookie token.
<ide> *
<del> * @param \Cake\Http\ServerRequest $request The request to validate against.
<add> * @param \Psr\Http\Message\ServerRequestInterface $request The request to validate against.
<ide> * @return void
<ide> * @throws \Cake\Http\Exception\InvalidCsrfTokenException When the CSRF token is invalid or missing.
<ide> */
<del> protected function _validateToken(ServerRequest $request): void
<add> protected function _validateToken(ServerRequestInterface $request): void
<ide> {
<ide> $cookies = $request->getCookieParams();
<ide> $cookie = Hash::get($cookies, $this->_config['cookieName']); | 1 |
Ruby | Ruby | allow --use-clang to override fails_with_llvm | d1aac6bc0328ea9ab3cc8bf379d7f1cddad3b9f3 | <ide><path>Library/Homebrew/formula.rb
<ide> def std_cmake_parameters
<ide> end
<ide>
<ide> def handle_llvm_failure llvm
<del> unless (ENV['HOMEBREW_USE_LLVM'] or ARGV.include? '--use-llvm')
<add> unless (ENV['HOMEBREW_USE_LLVM'] or ARGV.include? '--use-llvm' or ARGV.include? '--use-clang')
<ide> ENV.gcc_4_2 if default_cc =~ /llvm/
<ide> return
<ide> end | 1 |
Javascript | Javascript | change concatenated string to template | ef3776a78524d4d7986033838abfcdd0fba0fdc9 | <ide><path>lib/_http_common.js
<ide> function parserOnHeadersComplete(versionMajor, versionMinor, headers, method,
<ide> parser.incoming = new IncomingMessage(parser.socket);
<ide> parser.incoming.httpVersionMajor = versionMajor;
<ide> parser.incoming.httpVersionMinor = versionMinor;
<del> parser.incoming.httpVersion = versionMajor + '.' + versionMinor;
<add> parser.incoming.httpVersion = `${versionMajor}.${versionMinor}`;
<ide> parser.incoming.url = url;
<ide>
<ide> var n = headers.length; | 1 |
Javascript | Javascript | remove extra whitespace | ffdde26981739f28eb50f55bce4c85d71f72956c | <ide><path>src/Angular.js
<ide> function angularInit(element, bootstrap) {
<ide> *
<ide> * Angular will detect if it has been loaded into the browser more than once and only allow the
<ide> * first loaded script to be bootstrapped and will report a warning to the browser console for
<del> * each of the subsequent scripts. This prevents strange results in applications, where otherwise
<add> * each of the subsequent scripts. This prevents strange results in applications, where otherwise
<ide> * multiple instances of Angular try to work on the DOM.
<ide> *
<ide> * ```html | 1 |
PHP | PHP | remove duplicate id check | 66fdf12f0d9f5558f9b4274f36aa2872fd972a5a | <ide><path>src/Illuminate/Session/Store.php
<ide> public function __construct($name, SessionHandlerInterface $handler, $id = null)
<ide> $this->name = $name;
<ide> $this->handler = $handler;
<ide> $this->metaBag = new MetadataBag;
<del> $this->setId($id ?: $this->generateSessionId());
<add> $this->setId($id);
<ide> }
<ide>
<ide> /** | 1 |
Javascript | Javascript | add mdn link for iterable | 29f758731f6f70d119d6149f128f7e8fe7bb8279 | <ide><path>tools/doc/type-parser.js
<ide> const typeMap = {
<ide> 'http.IncomingMessage': 'http.html#http_class_http_incomingmessage',
<ide> 'http.Server': 'http.html#http_class_http_server',
<ide> 'http.ServerResponse': 'http.html#http_class_http_serverresponse',
<add> 'Iterable': jsDocPrefix +
<add> 'Reference/Iteration_protocols#The_iterable_protocol',
<ide> 'Iterator': jsDocPrefix +
<ide> 'Reference/Iteration_protocols#The_iterator_protocol'
<ide> }; | 1 |
Ruby | Ruby | cache the digest path on the stack | bf9b7325f8814a4d90267e59697e5125f40a69c3 | <ide><path>actionview/lib/action_view/renderer/partial_renderer/collection_caching.rb
<ide> def callable_cache_key?
<ide> def collection_by_cache_keys(view, template)
<ide> seed = callable_cache_key? ? @options[:cached] : ->(i) { i }
<ide>
<add> digest_path = view.digest_path_from_virtual(template.virtual_path)
<add>
<ide> @collection.each_with_object({}) do |item, hash|
<del> hash[expanded_cache_key(seed.call(item), view, template)] = item
<add> hash[expanded_cache_key(seed.call(item), view, template, digest_path)] = item
<ide> end
<ide> end
<ide>
<del> def expanded_cache_key(key, view, template)
<del> key = view.combined_fragment_cache_key(view.cache_fragment_name(key, virtual_path: template.virtual_path, digest_path: digest_path(view, template)))
<add> def expanded_cache_key(key, view, template, digest_path)
<add> key = view.combined_fragment_cache_key(view.cache_fragment_name(key, virtual_path: template.virtual_path, digest_path: digest_path))
<ide> key.frozen? ? key.dup : key # #read_multi & #write may require mutability, Dalli 2.6.0.
<ide> end
<ide>
<del> def digest_path(view, template)
<del> @digest_path ||= view.digest_path_from_virtual(template.virtual_path)
<del> end
<del>
<ide> # `order_by` is an enumerable object containing keys of the cache,
<ide> # all keys are passed in whether found already or not.
<ide> # | 1 |
Javascript | Javascript | remove unused/invalid flow suppression | 1142d9d059936ce0814c40f05693b7c6ee6b4e5c | <ide><path>Libraries/Animated/src/Interpolation.js
<ide> function colorToRgba(input: string): string {
<ide> return input;
<ide> }
<ide>
<del> int32Color = int32Color || 0; // $FlowIssue
<add> int32Color = int32Color || 0;
<ide>
<ide> var r = (int32Color & 0xff000000) >>> 24;
<ide> var g = (int32Color & 0x00ff0000) >>> 16; | 1 |
Go | Go | change error log to warning when close stdin fail | 37db0220452a837791af84d044b0b308260dccec | <ide><path>container/stream/streams.go
<ide> func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) {
<ide> go func() {
<ide> pools.Copy(iop.Stdin, stdin)
<ide> if err := iop.Stdin.Close(); err != nil {
<del> logrus.Errorf("failed to close stdin: %+v", err)
<add> logrus.Warnf("failed to close stdin: %+v", err)
<ide> }
<ide> }()
<ide> } | 1 |
Text | Text | fix curl invocation | 26187bd851141236a909c0bada5a2743fc237e0e | <ide><path>docs/sources/articles/https.md
<ide> location using the environment variable `DOCKER_CERT_PATH`.
<ide> To use `curl` to make test API requests, you need to use three extra command line
<ide> flags:
<ide>
<del> $ curl --insecure --cert ~/.docker/cert.pem --key ~/.docker/key.pem https://boot2docker:2376/images/json`
<add> $ curl https://boot2docker:2376/images/json \
<add> --cert ~/.docker/cert.pem \
<add> --key ~/.docker/key.pem \
<add> --cacert ~/.docker/ca.pem | 1 |
PHP | PHP | revert previous changes | e016f1156ff18b1324c3f8f4108aa6c0b8d3ae8b | <ide><path>lib/Cake/Routing/Router.php
<ide> public static function reverse($params, $full = false) {
<ide> * @param array|string $url URL to normalize Either an array or a string URL.
<ide> * @return string Normalized URL
<ide> */
<del> public static function normalize($url = '/', $strip = true) {
<add> public static function normalize($url = '/') {
<ide> if (is_array($url)) {
<ide> $url = Router::url($url);
<ide> }
<ide> public static function normalize($url = '/', $strip = true) {
<ide> }
<ide> $request = Router::getRequest();
<ide>
<del> if ($strip && !empty($request->base) && stristr($url, $request->base)) {
<add> if (!empty($request->base) && stristr($url, $request->base)) {
<ide> $url = preg_replace('/^' . preg_quote($request->base, '/') . '/', '', $url, 1);
<ide> }
<ide> $url = '/' . $url;
<ide><path>lib/Cake/Test/Case/Routing/RouterTest.php
<ide> public function testUrlNormalization() {
<ide> $result = Router::normalize('/us/users/logout/');
<ide> $this->assertEquals('/users/logout', $result);
<ide>
<del> $result = Router::normalize('/us/users/logout/', false);
<del> $this->assertEquals('/us/users/logout', $result);
<del>
<ide> Router::reload();
<ide>
<ide> $request = new CakeRequest(); | 2 |
Python | Python | add example deployment that setups an ssh key | b7f35d04c131fdeadbaba685f3ec3ba6fb0b3860 | <ide><path>libcloud/deployment.py
<add># Licensed to the Apache Software Foundation (ASF) under one or more
<add># contributor license agreements. See the NOTICE file distributed with
<add># this work for additional information regarding copyright ownership.
<add># libcloud.org licenses this file to You under the Apache License, Version 2.0
<add># (the "License"); you may not use this file except in compliance with
<add># the License. You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>
<add>"""
<add>Provides generic deployment steps for machines post boot.
<add>"""
<add>
<add>class Deployment(object):
<add> pass
<add>
<add>class SSHDeployment(Deployment):
<add> def __init__(self, key):
<add> self.key = key
<add>
<add> def run(self, node, client):
<add> sftp = client.open_sftp()
<add> sftp.mkdir(".ssh")
<add> sftp.chdir(".ssh")
<add> ak = sftp.file("authorized_keys", mode='w')
<add> ak.write(self.key)
<add> ak.close()
<add> sftp.close()
<add> return node
<ide>\ No newline at end of file | 1 |
Ruby | Ruby | use new version comparison machinery | 93baea7e47627cdd309715143b7d125966a28df5 | <ide><path>Library/Homebrew/cmd/outdated.rb
<ide> require 'formula'
<add>require 'keg'
<ide>
<ide> module Homebrew extend self
<ide> def outdated
<del> outdated_brews.each do |f|
<add> outdated_brews do |f|
<ide> if $stdout.tty? and not ARGV.flag? '--quiet'
<del> versions = f.rack.cd{ Dir['*'] }.join(', ')
<del> puts "#{f.name} (#{versions} < #{f.version})"
<add> versions = f.rack.subdirs.map { |d| Keg.new(d) }.map { |keg| keg.basename }
<add> puts "#{f.name} (#{versions*', '} < #{f.version})"
<ide> else
<ide> puts f.name
<ide> end
<ide> end
<ide> end
<ide>
<ide> def outdated_brews
<del> HOMEBREW_CELLAR.subdirs.map do |rack|
<del> # Skip kegs with no versions installed
<del> next unless rack.subdirs
<add> HOMEBREW_CELLAR.subdirs.reject { |d| d.subdirs.empty? }.map do |rack|
<add> f = Formula.factory(rack.basename.to_s) rescue nil
<add> next if f.nil?
<ide>
<del> # Skip HEAD formulae, consider them "evergreen"
<del> next if rack.subdirs.map{ |keg| keg.basename.to_s }.include? "HEAD"
<del>
<del> name = rack.basename.to_s
<del> f = Formula.factory name rescue nil
<del> f if f and not f.installed?
<add> kegs = rack.subdirs.map { |d| Keg.new(d) }
<add> unless kegs.any? { |keg| keg.version >= f.version }
<add> yield f if block_given?
<add> f
<add> end
<ide> end.compact
<ide> end
<ide> end
<ide><path>Library/Homebrew/cmd/upgrade.rb
<del>require 'cmd/outdated'
<ide> require 'cmd/install'
<ide>
<ide> class Fixnum
<ide> def upgrade
<ide> Homebrew.perform_preinstall_checks
<ide>
<ide> outdated = if ARGV.named.empty?
<add> require 'cmd/outdated'
<ide> Homebrew.outdated_brews
<ide> else
<ide> ARGV.formulae.select do |f| | 2 |
Javascript | Javascript | use default assertion messages | 0736ad47433104666aa4e4c1781be743db43f653 | <ide><path>test/addons-napi/test_buffer/test.js
<ide> const common = require('../../common');
<ide> const binding = require(`./build/${common.buildType}/test_buffer`);
<ide> const assert = require('assert');
<ide>
<del>assert.strictEqual(binding.newBuffer().toString(), binding.theText,
<del> 'buffer returned by newBuffer() has wrong contents');
<del>assert.strictEqual(binding.newExternalBuffer().toString(), binding.theText,
<del> 'buffer returned by newExternalBuffer() has wrong contents');
<add>assert.strictEqual(binding.newBuffer().toString(), binding.theText);
<add>assert.strictEqual(binding.newExternalBuffer().toString(), binding.theText);
<ide> console.log('gc1');
<ide> global.gc();
<ide> assert.strictEqual(binding.getDeleterCallCount(), 1, 'deleter was not called');
<del>assert.strictEqual(binding.copyBuffer().toString(), binding.theText,
<del> 'buffer returned by copyBuffer() has wrong contents');
<add>assert.strictEqual(binding.copyBuffer().toString(), binding.theText);
<ide>
<ide> let buffer = binding.staticBuffer();
<ide> assert.strictEqual(binding.bufferHasInstance(buffer), true,
<ide> 'buffer type checking fails');
<del>assert.strictEqual(binding.bufferInfo(buffer), true, 'buffer data is accurate');
<add>assert.strictEqual(binding.bufferInfo(buffer), true);
<ide> buffer = null;
<ide> global.gc();
<ide> console.log('gc2'); | 1 |
Java | Java | move interruptibleblockingqueue inside observeon | 5d1006ea07432e34f1ed4b2d75c0024fa8cdae78 | <ide><path>rxjava-core/src/main/java/rx/operators/OperatorObserveOn.java
<ide> */
<ide> package rx.operators;
<ide>
<add>import java.util.concurrent.Semaphore;
<ide> import java.util.concurrent.atomic.AtomicLong;
<ide>
<ide> import rx.Scheduler;
<ide> import rx.schedulers.TestScheduler;
<ide> import rx.schedulers.TrampolineScheduler;
<ide> import rx.subscriptions.Subscriptions;
<del>import rx.util.InterruptibleBlockingQueue;
<ide> import rx.util.functions.Action0;
<ide> import rx.util.functions.Action1;
<ide>
<ide> private class ObserveOnSubscriber extends Subscriber<T> {
<ide> final Subscriber<? super T> observer;
<ide> private volatile Scheduler.Inner recursiveScheduler;
<ide>
<del> private final InterruptibleBlockingQueue queue = new InterruptibleBlockingQueue(bufferSize);
<add> private final InterruptibleBlockingQueue<Object> queue = new InterruptibleBlockingQueue<Object>(bufferSize);
<ide> final AtomicLong counter = new AtomicLong(0);
<ide>
<ide> public ObserveOnSubscriber(Subscriber<? super T> observer) {
<ide> private void pollQueue() {
<ide>
<ide> }
<ide>
<add> /**
<add> * Single-producer-single-consumer queue (only thread-safe for 1 producer thread with 1 consumer thread).
<add> *
<add> * This supports an interrupt() being called externally rather than needing to interrupt the thread. This allows
<add> * unsubscribe behavior when this queue is being used.
<add> *
<add> * @param <E>
<add> */
<add> private static class InterruptibleBlockingQueue<E> {
<add>
<add> private final Semaphore semaphore;
<add> private volatile boolean interrupted = false;
<add>
<add> private final E[] buffer;
<add>
<add> private AtomicLong tail = new AtomicLong();
<add> private AtomicLong head = new AtomicLong();
<add> private final int capacity;
<add> private final int mask;
<add>
<add> @SuppressWarnings("unchecked")
<add> public InterruptibleBlockingQueue(final int size) {
<add> this.semaphore = new Semaphore(size);
<add> this.capacity = size;
<add> this.mask = size - 1;
<add> buffer = (E[]) new Object[size];
<add> }
<add>
<add> /**
<add> * Used to unsubscribe and interrupt the producer if blocked in put()
<add> */
<add> public void interrupt() {
<add> interrupted = true;
<add> semaphore.release();
<add> }
<add>
<add> public void addBlocking(final E e) throws InterruptedException {
<add> if (interrupted) {
<add> throw new InterruptedException("Interrupted by Unsubscribe");
<add> }
<add> semaphore.acquire();
<add> if (interrupted) {
<add> throw new InterruptedException("Interrupted by Unsubscribe");
<add> }
<add> if (e == null) {
<add> throw new IllegalArgumentException("Can not put null");
<add> }
<add>
<add> if (offer(e)) {
<add> return;
<add> } else {
<add> throw new IllegalStateException("Queue is full");
<add> }
<add> }
<add>
<add> private boolean offer(final E e) {
<add> final long _t = tail.get();
<add> if (_t - head.get() == capacity) {
<add> // queue is full
<add> return false;
<add> }
<add> int index = (int) (_t & mask);
<add> buffer[index] = e;
<add> // move the tail forward
<add> tail.lazySet(_t + 1);
<add>
<add> return true;
<add> }
<add>
<add> public E poll() {
<add> if (interrupted) {
<add> return null;
<add> }
<add> final long _h = head.get();
<add> if (tail.get() == _h) {
<add> // nothing available
<add> return null;
<add> }
<add> int index = (int) (_h & mask);
<add>
<add> // fetch the item
<add> E v = buffer[index];
<add> // allow GC to happen
<add> buffer[index] = null;
<add> // increment and signal we're done
<add> head.lazySet(_h + 1);
<add> if (v != null) {
<add> semaphore.release();
<add> }
<add> return v;
<add> }
<add>
<add> public int size()
<add> {
<add> int size;
<add> do
<add> {
<add> final long currentHead = head.get();
<add> final long currentTail = tail.get();
<add> size = (int) (currentTail - currentHead);
<add> } while (size > buffer.length);
<add>
<add> return size;
<add> }
<add>
<add> }
<ide> }
<ide>\ No newline at end of file
<ide><path>rxjava-core/src/main/java/rx/util/InterruptibleBlockingQueue.java
<del>package rx.util;
<del>
<del>import java.util.concurrent.Semaphore;
<del>import java.util.concurrent.atomic.AtomicLong;
<del>
<del>/**
<del> * Single-producer-single-consumer queue (only thread-safe for 1 producer thread with 1 consumer thread).
<del> *
<del> * This supports an interrupt() being called externally rather than needing to interrupt the thread. This allows
<del> * unsubscribe behavior when this queue is being used.
<del> *
<del> * @param <E>
<del> */
<del>public class InterruptibleBlockingQueue<E> {
<del>
<del> private final Semaphore semaphore;
<del> private volatile boolean interrupted = false;
<del>
<del> private final E[] buffer;
<del>
<del> private AtomicLong tail = new AtomicLong();
<del> private AtomicLong head = new AtomicLong();
<del> private final int capacity;
<del> private final int mask;
<del>
<del> @SuppressWarnings("unchecked")
<del> public InterruptibleBlockingQueue(final int size) {
<del> this.semaphore = new Semaphore(size);
<del> this.capacity = size;
<del> this.mask = size - 1;
<del> buffer = (E[]) new Object[size];
<del> }
<del>
<del> /**
<del> * Used to unsubscribe and interrupt the producer if blocked in put()
<del> */
<del> public void interrupt() {
<del> interrupted = true;
<del> semaphore.release();
<del> }
<del>
<del> public void addBlocking(final E e) throws InterruptedException {
<del> if (interrupted) {
<del> throw new InterruptedException("Interrupted by Unsubscribe");
<del> }
<del> semaphore.acquire();
<del> if (interrupted) {
<del> throw new InterruptedException("Interrupted by Unsubscribe");
<del> }
<del> if (e == null) {
<del> throw new IllegalArgumentException("Can not put null");
<del> }
<del>
<del> if (offer(e)) {
<del> return;
<del> } else {
<del> throw new IllegalStateException("Queue is full");
<del> }
<del> }
<del>
<del> private boolean offer(final E e) {
<del> final long _t = tail.get();
<del> if (_t - head.get() == capacity) {
<del> // queue is full
<del> return false;
<del> }
<del> int index = (int) (_t & mask);
<del> buffer[index] = e;
<del> // move the tail forward
<del> tail.lazySet(_t + 1);
<del>
<del> return true;
<del> }
<del>
<del> public E poll() {
<del> if (interrupted) {
<del> return null;
<del> }
<del> final long _h = head.get();
<del> if (tail.get() == _h) {
<del> // nothing available
<del> return null;
<del> }
<del> int index = (int) (_h & mask);
<del>
<del> // fetch the item
<del> E v = buffer[index];
<del> // allow GC to happen
<del> buffer[index] = null;
<del> // increment and signal we're done
<del> head.lazySet(_h + 1);
<del> if (v != null) {
<del> semaphore.release();
<del> }
<del> return v;
<del> }
<del>
<del> public int size()
<del> {
<del> int size;
<del> do
<del> {
<del> final long currentHead = head.get();
<del> final long currentTail = tail.get();
<del> size = (int) (currentTail - currentHead);
<del> } while (size > buffer.length);
<del>
<del> return size;
<del> }
<del>
<del>} | 2 |
Javascript | Javascript | remove new statement before buffer.from | 04555e36d39238b3728b349e2db911b36e60a9df | <ide><path>lib/LibManifestPlugin.js
<ide> class LibManifestPlugin {
<ide> return obj;
<ide> }, Object.create(null))
<ide> };
<del> const content = new Buffer.from(JSON.stringify(manifest), "utf8");
<add> const content = Buffer.from(JSON.stringify(manifest), "utf8");
<ide> compiler.outputFileSystem.mkdirp(path.dirname(targetPath), err => {
<ide> if(err) return callback(err);
<ide> compiler.outputFileSystem.writeFile(targetPath, content, callback); | 1 |
Java | Java | fix typo in @scheduled | 9a7fb7022dd5b7d2d8d5514ab2fb827b3e32854a | <ide><path>spring-context/src/main/java/org/springframework/scheduling/annotation/Scheduled.java
<ide> * The {@link TimeUnit} to use for {@link #fixedDelay}, {@link #fixedDelayString},
<ide> * {@link #fixedRate}, {@link #fixedRateString}, {@link #initialDelay}, and
<ide> * {@link #initialDelayString}.
<del> * <p>Defaults to {@link TimeUnit#MICROSECONDS}.
<add> * <p>Defaults to {@link TimeUnit#MILLISECONDS}.
<ide> * <p>This attribute is ignored for {@linkplain #cron() cron expressions}
<ide> * and for {@link java.time.Duration} values supplied via {@link #fixedDelayString},
<ide> * {@link #fixedRateString}, or {@link #initialDelayString}. | 1 |
Ruby | Ruby | remove the universal accessor from buildoptions | e6498f4dfc37524be8d0827957ed75fc471ad345 | <ide><path>Library/Homebrew/build_options.rb
<ide> class BuildOptions
<del> attr_accessor :universal
<del>
<ide> def initialize(args, options)
<ide> @args = args
<ide> @options = options
<ide> def stable?
<ide>
<ide> # True if the user requested a universal build.
<ide> def universal?
<del> universal || include?("universal") && option_defined?("universal")
<add> include?("universal") && option_defined?("universal")
<ide> end
<ide>
<ide> # True if the user requested to enable C++11 mode.
<ide><path>Library/Homebrew/compat/formula.rb
<ide> def self.class_s(name)
<ide> def self.factory(name)
<ide> Formulary.factory(name)
<ide> end
<add>
<add> def self.require_universal_deps
<add> define_method(:require_universal_deps?) { true }
<add> end
<ide> end
<ide><path>Library/Homebrew/formula.rb
<ide> def skip_cxxstdlib_check?
<ide> self.class.cxxstdlib.include?(:skip)
<ide> end
<ide>
<add> def require_universal_deps?
<add> false
<add> end
<add>
<ide> # yields self with current working directory set to the uncompressed tarball
<ide> def brew
<ide> validate_attributes :name, :version
<ide> def needs *standards
<ide> end
<ide> end
<ide>
<del> def require_universal_deps
<del> specs.each { |spec| spec.build.universal = true }
<del> end
<del>
<ide> def test &block
<ide> return @test unless block_given?
<ide> @test_defined = true
<ide><path>Library/Homebrew/formula_installer.rb
<ide> def effective_build_options_for(dependent, inherited_options=[])
<ide> def inherited_options_for(dep)
<ide> inherited_options = Options.new
<ide> u = Option.new("universal")
<del> if (options.include?(u) || f.build.universal?) && !dep.build? && dep.to_formula.option_defined?(u)
<add> if (options.include?(u) || f.require_universal_deps?) && !dep.build? && dep.to_formula.option_defined?(u)
<ide> inherited_options << u
<ide> end
<ide> inherited_options | 4 |
Go | Go | fix unmarshal error in endpoint interface | bb6449bedbff8336291b2c24a281d9b042c44ca2 | <ide><path>libnetwork/endpoint.go
<ide> func (ep *endpoint) UnmarshalJSON(b []byte) (err error) {
<ide> ep.id = epMap["id"].(string)
<ide>
<ide> ib, _ := json.Marshal(epMap["ep_iface"])
<del> json.Unmarshal(ib, ep.iface)
<add> json.Unmarshal(ib, &ep.iface)
<ide>
<ide> tb, _ := json.Marshal(epMap["exposed_ports"])
<ide> var tPorts []types.TransportPort | 1 |
Python | Python | push cls 283653640, 284011539 | 22e20f84f8a06d5c489c1c837f81d4fd56ccb187 | <ide><path>research/lstm_object_detection/lstm/rnn_decoder.py
<ide> """Custom RNN decoder."""
<ide>
<ide> import tensorflow as tf
<add>import lstm_object_detection.lstm.utils as lstm_utils
<add>
<add>
<add>class _NoVariableScope(object):
<add>
<add> def __enter__(self):
<add> return
<add>
<add> def __exit__(self, exc_type, exc_value, traceback):
<add> return False
<ide>
<ide>
<ide> def rnn_decoder(decoder_inputs,
<ide> def rnn_decoder(decoder_inputs,
<ide> * prev is a 2D Tensor of shape [batch_size x output_size],
<ide> * i is an integer, the step number (when advanced control is needed),
<ide> * next is a 2D Tensor of shape [batch_size x input_size].
<del> scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
<add> scope: optional VariableScope for the created subgraph.
<ide> Returns:
<ide> A tuple of the form (outputs, state), where:
<ide> outputs: A list of the same length as decoder_inputs of 4D Tensors with
<ide> def rnn_decoder(decoder_inputs,
<ide> cell at each time-step. It is a 2D Tensor of shape
<ide> [batch_size x cell.state_size].
<ide> """
<del> with tf.variable_scope(scope or 'rnn_decoder'):
<add> with tf.variable_scope(scope) if scope else _NoVariableScope():
<ide> state_tuple = initial_state
<ide> outputs = []
<ide> states = []
<ide> def multi_input_rnn_decoder(decoder_inputs,
<ide> Useful when input sequences have differing numbers of channels. Final
<ide> bottlenecks will have the same dimension.
<ide> flatten_state: Whether the LSTM state is flattened.
<del> scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
<add> scope: optional VariableScope for the created subgraph.
<ide> Returns:
<ide> A tuple of the form (outputs, state), where:
<ide> outputs: A list of the same length as decoder_inputs of 2D Tensors with
<ide> def multi_input_rnn_decoder(decoder_inputs,
<ide> """
<ide> if flatten_state and len(decoder_inputs[0]) > 1:
<ide> raise ValueError('In export mode, unroll length should not be more than 1')
<del> with tf.variable_scope(scope or 'rnn_decoder'):
<add> with tf.variable_scope(scope) if scope else _NoVariableScope():
<ide> state_tuple = initial_state
<ide> outputs = []
<ide> states = []
<ide> def multi_input_rnn_decoder(decoder_inputs,
<ide>
<ide> action = generate_action(selection_strategy, local_step, sequence_step,
<ide> [batch_size, 1, 1, 1])
<del> inputs, _ = select_inputs(decoder_inputs, action, local_step)
<add> inputs, _ = (
<add> select_inputs(decoder_inputs, action, local_step, is_training,
<add> is_quantized))
<ide> # Mark base network endpoints under raw_inputs/
<ide> with tf.name_scope(None):
<ide> inputs = tf.identity(inputs, 'raw_inputs/base_endpoint')
<ide> def generate_action(selection_strategy, local_step, sequence_step,
<ide> return tf.cast(action, tf.int32)
<ide>
<ide>
<del>def select_inputs(decoder_inputs, action, local_step, get_alt_inputs=False):
<add>def select_inputs(decoder_inputs, action, local_step, is_training, is_quantized,
<add> get_alt_inputs=False):
<ide> """Selects sequence from decoder_inputs based on 1D actions.
<ide>
<ide> Given multiple input batches, creates a single output batch by
<ide> def select_inputs(decoder_inputs, action, local_step, get_alt_inputs=False):
<ide> decoder_inputs: A 2-D list of tensor inputs.
<ide> action: A tensor of shape [batch_size]. Each element corresponds to an index
<ide> of decoder_inputs to choose.
<del> step: The current timestep.
<add> local_step: The current timestep.
<add> is_training: boolean, whether the network is training. When using learned
<add> selection, attempts exploration if training.
<add> is_quantized: flag to enable/disable quantization mode.
<ide> get_alt_inputs: Whether the non-chosen inputs should also be returned.
<ide>
<ide> Returns:
<ide> def select_inputs(decoder_inputs, action, local_step, get_alt_inputs=False):
<ide> [decoder_inputs[seq_index][local_step] for seq_index in range(num_seqs)],
<ide> axis=-1)
<ide> action_index = tf.one_hot(action, num_seqs)
<del> inputs = tf.reduce_sum(stacked_inputs * action_index, axis=-1)
<add> selected_inputs = (
<add> lstm_utils.quantize_op(stacked_inputs * action_index, is_training,
<add> is_quantized, scope='quant_selected_inputs'))
<add> inputs = tf.reduce_sum(selected_inputs, axis=-1)
<ide> inputs_alt = None
<ide> # Only works for 2 models.
<ide> if get_alt_inputs:
<ide> # Reverse of action_index.
<ide> action_index_alt = tf.one_hot(action, num_seqs, on_value=0.0, off_value=1.0)
<del> inputs_alt = tf.reduce_sum(stacked_inputs * action_index_alt, axis=-1)
<add> selected_inputs = (
<add> lstm_utils.quantize_op(stacked_inputs * action_index_alt, is_training,
<add> is_quantized, scope='quant_selected_inputs_alt'))
<add> inputs_alt = tf.reduce_sum(selected_inputs, axis=-1)
<ide> return inputs, inputs_alt
<ide>
<ide> def select_state(previous_state, new_state, action):
<ide><path>research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor.py
<ide> def extract_features(self, preprocessed_inputs, state_saver=None,
<ide> 'not equal!')
<ide>
<ide> with slim.arg_scope(self._conv_hyperparams_fn()):
<del> with tf.variable_scope('LSTM', reuse=self._reuse_weights) as lstm_scope:
<add> with tf.variable_scope('LSTM', reuse=self._reuse_weights):
<ide> output_size = (large_base_feature_shape[1], large_base_feature_shape[2])
<ide> lstm_cell, init_state, step = self.create_lstm_cell(
<ide> batch_size, output_size, state_saver, state_name)
<ide> def extract_features(self, preprocessed_inputs, state_saver=None,
<ide> step,
<ide> selection_strategy=self._interleave_method,
<ide> is_training=self._is_training,
<add> is_quantized=self._is_quantized,
<ide> pre_bottleneck=self._pre_bottleneck,
<ide> flatten_state=self._flatten_state,
<del> scope=lstm_scope)
<add> scope=None)
<ide> self._states_out = states_out
<ide>
<ide> batcher_ops = None | 2 |
Java | Java | add test to reproduce spr-15271 | 598d9a4b052922e6dec1a9b28b859fe2066912bb | <ide><path>spring-core/src/test/java/org/springframework/core/annotation/AnnotatedElementUtilsTests.java
<ide> /*
<del> * Copyright 2002-2016 the original author or authors.
<add> * Copyright 2002-2017 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> public void getAllAnnotationAttributesOnClassWithMultipleComposedAnnotations() {
<ide> attributes.get("value"));
<ide> }
<ide>
<add> @Test
<add> @Ignore("To be validated by ")
<add> public void getAllMergedAnnotationsOnClassWithInterface() throws NoSuchMethodException {
<add> Method m = TransactionalServiceImpl.class.getMethod("doIt");
<add> Set<Transactional> allMergedAnnotations =
<add> getAllMergedAnnotations(m, Transactional.class);
<add> assertEquals(1, allMergedAnnotations.size());
<add> }
<add>
<ide> @Test
<ide> public void getMergedAnnotationAttributesOnClassWithLocalAnnotation() {
<ide> Class<?> element = TxConfig.class;
<ide> static class SpringAppConfigClass {
<ide> static class ResourceHolder {
<ide> }
<ide>
<add> interface TransactionalService {
<add>
<add> @Transactional
<add> void doIt();
<add> }
<add>
<add> class TransactionalServiceImpl implements TransactionalService {
<add>
<add> @Override
<add> public void doIt() {
<add> }
<add> }
<add>
<ide> } | 1 |
Javascript | Javascript | log refresh failures | 2f2a0763a27c344914338978b23af2565a807307 | <ide><path>src/git-repository-async.js
<ide> export default class GitRepositoryAsync {
<ide> return Promise.reject(e)
<ide> }
<ide> })
<add> .catch(e => {
<add> console.error('Error refreshing repository status:')
<add> console.error(e)
<add> return Promise.reject(e)
<add> })
<ide> })
<ide> return this._refreshingPromise
<ide> } | 1 |
Java | Java | improve semantics writing currentdata | 01a82b529107f75ded838bebeb0045aac8903be6 | <ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/AbstractListenerWriteFlushProcessor.java
<ide> public <T> void onComplete(AbstractListenerWriteFlushProcessor<T> processor) {
<ide> }
<ide>
<ide> public <T> void writeComplete(AbstractListenerWriteFlushProcessor<T> processor) {
<del> // ignore
<add> throw new IllegalStateException(toString());
<ide> }
<ide>
<ide> public <T> void onFlushPossible(AbstractListenerWriteFlushProcessor<T> processor) {
<ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/AbstractListenerWriteProcessor.java
<ide> private Subscription subscription;
<ide>
<ide> @Nullable
<del> protected volatile T currentData;
<add> private volatile T currentData;
<ide>
<ide> private volatile boolean subscriberCompleted;
<ide>
<ide> protected void dataReceived(T data) {
<ide> this.currentData = data;
<ide> }
<ide>
<del> /**
<del> * Called when the current received data item can be released.
<del> */
<del> protected abstract void releaseData();
<del>
<ide> /**
<ide> * Whether writing is possible.
<ide> */
<ide> protected abstract boolean isWritePossible();
<ide>
<ide> /**
<ide> * Write the given item.
<add> * <p><strong>Note:</strong> Sub-classes are responsible for releasing any
<add> * data buffer associated with the item, once fully written, if pooled
<add> * buffers apply to the underlying container.
<ide> * @param data the item to write
<del> * @return whether the data was fully written ({@code true})
<del> * and new data can be requested, or otherwise ({@code false})
<add> * @return whether the current data item was written and another one
<add> * requested ({@code true}), or or otherwise if more writes are required.
<ide> */
<ide> protected abstract boolean write(T data) throws IOException;
<ide>
<ide> protected void dataReceived(T data) {
<ide> * the next item from the upstream, write Publisher.
<ide> * <p>The default implementation is a no-op.
<ide> */
<del> protected void suspendWriting() {
<add> protected void writingPaused() {
<ide> }
<ide>
<ide> /**
<ide> public <T> void onWritePossible(AbstractListenerWriteProcessor<T> processor) {
<ide> T data = processor.currentData;
<ide> Assert.state(data != null, "No data");
<ide> try {
<del> boolean writeCompleted = processor.write(data);
<del> if (writeCompleted) {
<del> processor.releaseData();
<add> if (processor.write(data)) {
<ide> if (processor.changeState(WRITING, REQUESTED)) {
<add> processor.currentData = null;
<ide> if (processor.subscriberCompleted) {
<ide> processor.changeStateToComplete(REQUESTED);
<ide> }
<ide> else {
<del> processor.suspendWriting();
<add> processor.writingPaused();
<ide> Assert.state(processor.subscription != null, "No subscription");
<ide> processor.subscription.request(1);
<ide> }
<ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/ServletServerHttpResponse.java
<ide> import org.springframework.core.io.buffer.DataBuffer;
<ide> import org.springframework.core.io.buffer.DataBufferFactory;
<ide> import org.springframework.core.io.buffer.DataBufferUtils;
<del>import org.springframework.http.HttpStatus;
<ide> import org.springframework.http.MediaType;
<ide> import org.springframework.http.ResponseCookie;
<ide> import org.springframework.lang.Nullable;
<ide> protected boolean isWritePossible() {
<ide> return ServletServerHttpResponse.this.isWritePossible();
<ide> }
<ide>
<del> @Override
<del> protected void releaseData() {
<del> if (logger.isTraceEnabled()) {
<del> logger.trace("releaseData: " + this.currentData);
<del> }
<del> DataBufferUtils.release(this.currentData);
<del> this.currentData = null;
<del> }
<del>
<ide> @Override
<ide> protected boolean isDataEmpty(DataBuffer dataBuffer) {
<ide> return dataBuffer.readableByteCount() == 0;
<ide> protected boolean write(DataBuffer dataBuffer) throws IOException {
<ide> if (this.logger.isTraceEnabled()) {
<ide> this.logger.trace("written: " + written + " total: " + remaining);
<ide> }
<del> return written == remaining;
<del> }
<del> else {
<del> return false;
<add> if (written == remaining) {
<add> if (logger.isTraceEnabled()) {
<add> logger.trace("releaseData: " + dataBuffer);
<add> }
<add> DataBufferUtils.release(dataBuffer);
<add> return true;
<add> }
<ide> }
<add> return false;
<ide> }
<ide>
<ide> @Override
<ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/UndertowServerHttpResponse.java
<ide> protected boolean write(DataBuffer dataBuffer) throws IOException {
<ide> if (logger.isTraceEnabled()) {
<ide> logger.trace("written: " + written + " total: " + total);
<ide> }
<del> return written == total;
<add> if (written != total) {
<add> return false;
<add> }
<add> if (logger.isTraceEnabled()) {
<add> logger.trace("releaseData: " + dataBuffer);
<add> }
<add> DataBufferUtils.release(dataBuffer);
<add> this.byteBuffer = null;
<add> return true;
<ide> }
<ide>
<ide> private int writeByteBuffer(ByteBuffer byteBuffer) throws IOException {
<ide> protected void dataReceived(DataBuffer dataBuffer) {
<ide> this.byteBuffer = dataBuffer.asByteBuffer();
<ide> }
<ide>
<del> @Override
<del> protected void releaseData() {
<del> if (logger.isTraceEnabled()) {
<del> logger.trace("releaseData: " + this.currentData);
<del> }
<del> DataBufferUtils.release(this.currentData);
<del> this.currentData = null;
<del>
<del> this.byteBuffer = null;
<del> }
<del>
<ide> @Override
<ide> protected boolean isDataEmpty(DataBuffer dataBuffer) {
<ide> return (dataBuffer.readableByteCount() == 0);
<ide> }
<ide>
<ide> @Override
<del> protected void suspendWriting() {
<add> protected void writingPaused() {
<ide> this.channel.suspendWrites();
<ide> }
<ide>
<ide><path>spring-webflux/src/main/java/org/springframework/web/reactive/socket/adapter/AbstractListenerWebSocketSession.java
<ide> public Mono<Void> send(Publisher<WebSocketMessage> messages) {
<ide>
<ide> /**
<ide> * Send the given WebSocket message.
<add> * <p><strong>Note:</strong> Sub-classes are responsible for releasing the
<add> * payload data buffer, once fully written, if pooled buffers apply to the
<add> * underlying container.
<ide> */
<ide> protected abstract boolean sendMessage(WebSocketMessage message) throws IOException;
<ide>
<ide> protected boolean write(WebSocketMessage message) throws IOException {
<ide> return sendMessage(message);
<ide> }
<ide>
<del> @Override
<del> protected void releaseData() {
<del> this.currentData = null;
<del> }
<del>
<ide> @Override
<ide> protected boolean isDataEmpty(WebSocketMessage message) {
<ide> return (message.getPayload().readableByteCount() == 0);
<ide> }
<ide>
<ide> @Override
<ide> protected boolean isWritePossible() {
<del> return (this.isReady && this.currentData != null);
<add> return (this.isReady);
<ide> }
<ide>
<ide> /**
<ide><path>spring-webflux/src/main/java/org/springframework/web/reactive/socket/adapter/UndertowWebSocketSession.java
<ide> import reactor.core.publisher.Mono;
<ide> import reactor.core.publisher.MonoProcessor;
<ide>
<add>import org.springframework.core.io.buffer.DataBuffer;
<ide> import org.springframework.core.io.buffer.DataBufferFactory;
<add>import org.springframework.core.io.buffer.DataBufferUtils;
<ide> import org.springframework.lang.Nullable;
<ide> import org.springframework.util.ObjectUtils;
<ide> import org.springframework.web.reactive.socket.CloseStatus;
<ide> protected boolean sendMessage(WebSocketMessage message) throws IOException {
<ide> if (WebSocketMessage.Type.TEXT.equals(message.getType())) {
<ide> getSendProcessor().setReadyToSend(false);
<ide> String text = new String(buffer.array(), StandardCharsets.UTF_8);
<del> WebSockets.sendText(text, getDelegate(), new SendProcessorCallback());
<add> WebSockets.sendText(text, getDelegate(), new SendProcessorCallback(message.getPayload()));
<ide> }
<ide> else if (WebSocketMessage.Type.BINARY.equals(message.getType())) {
<ide> getSendProcessor().setReadyToSend(false);
<del> WebSockets.sendBinary(buffer, getDelegate(), new SendProcessorCallback());
<add> WebSockets.sendBinary(buffer, getDelegate(), new SendProcessorCallback(message.getPayload()));
<ide> }
<ide> else if (WebSocketMessage.Type.PING.equals(message.getType())) {
<ide> getSendProcessor().setReadyToSend(false);
<del> WebSockets.sendPing(buffer, getDelegate(), new SendProcessorCallback());
<add> WebSockets.sendPing(buffer, getDelegate(), new SendProcessorCallback(message.getPayload()));
<ide> }
<ide> else if (WebSocketMessage.Type.PONG.equals(message.getType())) {
<ide> getSendProcessor().setReadyToSend(false);
<del> WebSockets.sendPong(buffer, getDelegate(), new SendProcessorCallback());
<add> WebSockets.sendPong(buffer, getDelegate(), new SendProcessorCallback(message.getPayload()));
<ide> }
<ide> else {
<ide> throw new IllegalArgumentException("Unexpected message type: " + message.getType());
<ide> public Mono<Void> close(CloseStatus status) {
<ide>
<ide> private final class SendProcessorCallback implements WebSocketCallback<Void> {
<ide>
<add> private final DataBuffer payload;
<add>
<add> SendProcessorCallback(DataBuffer payload) {
<add> this.payload = payload;
<add> }
<add>
<ide> @Override
<ide> public void complete(WebSocketChannel channel, Void context) {
<add> DataBufferUtils.release(this.payload);
<ide> getSendProcessor().setReadyToSend(true);
<ide> getSendProcessor().onWritePossible();
<ide> }
<ide>
<ide> @Override
<ide> public void onError(WebSocketChannel channel, Void context, Throwable throwable) {
<add> DataBufferUtils.release(this.payload);
<ide> getSendProcessor().cancel();
<ide> getSendProcessor().onError(throwable);
<ide> } | 6 |
Mixed | Python | add type hints and tests. | c6dd9753893533934c7804bb714bbce2de8dd1a7 | <ide><path>DIRECTORY.md
<ide> * [Heaps Algorithm](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/heaps_algorithm.py)
<ide> * [Heaps Algorithm Iterative](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/heaps_algorithm_iterative.py)
<ide> * [Inversions](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/inversions.py)
<add> * [Kth Order Statistic](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/kth_order_statistic.py)
<ide> * [Max Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/max_subarray_sum.py)
<ide> * [Mergesort](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/mergesort.py)
<ide> * [Power](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/power.py)
<ide> * [Chudnovsky Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/chudnovsky_algorithm.py)
<ide> * [Collatz Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/collatz_sequence.py)
<ide> * [Combinations](https://github.com/TheAlgorithms/Python/blob/master/maths/combinations.py)
<add> * [Decimal Isolate](https://github.com/TheAlgorithms/Python/blob/master/maths/decimal_isolate.py)
<ide> * [Entropy](https://github.com/TheAlgorithms/Python/blob/master/maths/entropy.py)
<ide> * [Eulers Totient](https://github.com/TheAlgorithms/Python/blob/master/maths/eulers_totient.py)
<ide> * [Explicit Euler](https://github.com/TheAlgorithms/Python/blob/master/maths/explicit_euler.py)
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_062/sol1.py)
<ide> * Problem 063
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_063/sol1.py)
<add> * Problem 064
<add> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_064/sol1.py)
<ide> * Problem 065
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_065/sol1.py)
<ide> * Problem 067
<ide> * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_072/sol2.py)
<ide> * Problem 074
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_074/sol1.py)
<add> * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_074/sol2.py)
<ide> * Problem 075
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_075/sol1.py)
<ide> * Problem 076
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_174/sol1.py)
<ide> * Problem 191
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_191/sol1.py)
<add> * Problem 203
<add> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_203/sol1.py)
<ide> * Problem 206
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_206/sol1.py)
<ide> * Problem 207
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_207/sol1.py)
<ide> * Problem 234
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_234/sol1.py)
<add> * Problem 301
<add> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_301/sol1.py)
<ide> * Problem 551
<ide> * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_551/sol1.py)
<ide>
<ide><path>searches/ternary_search.py
<ide> Time Complexity : O(log3 N)
<ide> Space Complexity : O(1)
<ide> """
<del>import sys
<ide> from typing import List
<ide>
<ide> # This is the precision for this function which can be altered.
<ide>
<ide>
<ide> # This is the linear search that will occur after the search space has become smaller.
<del>def lin_search(left: int, right: int, A: List[int], target: int):
<del> for i in range(left, right + 1):
<del> if A[i] == target:
<del> return i
<ide>
<ide>
<del># This is the iterative method of the ternary search algorithm.
<del>def ite_ternary_search(A: List[int], target: int):
<del> left = 0
<del> right = len(A) - 1
<del> while True:
<del> if left < right:
<add>def lin_search(left: int, right: int, array: List[int], target: int) -> int:
<add> """Perform linear search in list. Returns -1 if element is not found.
<add>
<add> Parameters
<add> ----------
<add> left : int
<add> left index bound.
<add> right : int
<add> right index bound.
<add> array : List[int]
<add> List of elements to be searched on
<add> target : int
<add> Element that is searched
<add>
<add> Returns
<add> -------
<add> int
<add> index of element that is looked for.
<add>
<add> Examples
<add> --------
<add> >>> lin_search(0, 4, [4, 5, 6, 7], 7)
<add> 3
<add> >>> lin_search(0, 3, [4, 5, 6, 7], 7)
<add> -1
<add> >>> lin_search(0, 2, [-18, 2], -18)
<add> 0
<add> >>> lin_search(0, 1, [5], 5)
<add> 0
<add> >>> lin_search(0, 3, ['a', 'c', 'd'], 'c')
<add> 1
<add> >>> lin_search(0, 3, [.1, .4 , -.1], .1)
<add> 0
<add> >>> lin_search(0, 3, [.1, .4 , -.1], -.1)
<add> 2
<add> """
<add> for i in range(left, right):
<add> if array[i] == target:
<add> return i
<add> return -1
<add>
<add>
<add>def ite_ternary_search(array: List[int], target: int) -> int:
<add> """Iterative method of the ternary search algorithm.
<add> >>> test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42]
<add> >>> ite_ternary_search(test_list, 3)
<add> -1
<add> >>> ite_ternary_search(test_list, 13)
<add> 4
<add> >>> ite_ternary_search([4, 5, 6, 7], 4)
<add> 0
<add> >>> ite_ternary_search([4, 5, 6, 7], -10)
<add> -1
<add> >>> ite_ternary_search([-18, 2], -18)
<add> 0
<add> >>> ite_ternary_search([5], 5)
<add> 0
<add> >>> ite_ternary_search(['a', 'c', 'd'], 'c')
<add> 1
<add> >>> ite_ternary_search(['a', 'c', 'd'], 'f')
<add> -1
<add> >>> ite_ternary_search([], 1)
<add> -1
<add> >>> ite_ternary_search([.1, .4 , -.1], .1)
<add> 0
<add> """
<ide>
<del> if right - left < precision:
<del> return lin_search(left, right, A, target)
<add> left = 0
<add> right = len(array)
<add> while left <= right:
<add> if right - left < precision:
<add> return lin_search(left, right, array, target)
<ide>
<del> oneThird = (left + right) / 3 + 1
<del> twoThird = 2 * (left + right) / 3 + 1
<add> one_third = (left + right) / 3 + 1
<add> two_third = 2 * (left + right) / 3 + 1
<ide>
<del> if A[oneThird] == target:
<del> return oneThird
<del> elif A[twoThird] == target:
<del> return twoThird
<add> if array[one_third] == target:
<add> return one_third
<add> elif array[two_third] == target:
<add> return two_third
<ide>
<del> elif target < A[oneThird]:
<del> right = oneThird - 1
<del> elif A[twoThird] < target:
<del> left = twoThird + 1
<add> elif target < array[one_third]:
<add> right = one_third - 1
<add> elif array[two_third] < target:
<add> left = two_third + 1
<ide>
<del> else:
<del> left = oneThird + 1
<del> right = twoThird - 1
<ide> else:
<del> return None
<ide>
<del>
<del># This is the recursive method of the ternary search algorithm.
<del>def rec_ternary_search(left: int, right: int, A: List[int], target: int):
<add> left = one_third + 1
<add> right = two_third - 1
<add> else:
<add> return -1
<add>
<add>
<add>def rec_ternary_search(left: int, right: int, array: List[int], target: int) -> int:
<add> """Recursive method of the ternary search algorithm.
<add>
<add> >>> test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42]
<add> >>> rec_ternary_search(0, len(test_list), test_list, 3)
<add> -1
<add> >>> rec_ternary_search(4, len(test_list), test_list, 42)
<add> 8
<add> >>> rec_ternary_search(0, 2, [4, 5, 6, 7], 4)
<add> 0
<add> >>> rec_ternary_search(0, 3, [4, 5, 6, 7], -10)
<add> -1
<add> >>> rec_ternary_search(0, 1, [-18, 2], -18)
<add> 0
<add> >>> rec_ternary_search(0, 1, [5], 5)
<add> 0
<add> >>> rec_ternary_search(0, 2, ['a', 'c', 'd'], 'c')
<add> 1
<add> >>> rec_ternary_search(0, 2, ['a', 'c', 'd'], 'f')
<add> -1
<add> >>> rec_ternary_search(0, 0, [], 1)
<add> -1
<add> >>> rec_ternary_search(0, 3, [.1, .4 , -.1], .1)
<add> 0
<add> """
<ide> if left < right:
<del>
<ide> if right - left < precision:
<del> return lin_search(left, right, A, target)
<del>
<del> oneThird = (left + right) / 3 + 1
<del> twoThird = 2 * (left + right) / 3 + 1
<del>
<del> if A[oneThird] == target:
<del> return oneThird
<del> elif A[twoThird] == target:
<del> return twoThird
<del>
<del> elif target < A[oneThird]:
<del> return rec_ternary_search(left, oneThird - 1, A, target)
<del> elif A[twoThird] < target:
<del> return rec_ternary_search(twoThird + 1, right, A, target)
<del>
<add> return lin_search(left, right, array, target)
<add> one_third = (left + right) / 3 + 1
<add> two_third = 2 * (left + right) / 3 + 1
<add>
<add> if array[one_third] == target:
<add> return one_third
<add> elif array[two_third] == target:
<add> return two_third
<add>
<add> elif target < array[one_third]:
<add> return rec_ternary_search(left, one_third - 1, array, target)
<add> elif array[two_third] < target:
<add> return rec_ternary_search(two_third + 1, right, array, target)
<ide> else:
<del> return rec_ternary_search(oneThird + 1, twoThird - 1, A, target)
<add> return rec_ternary_search(one_third + 1, two_third - 1, array, target)
<ide> else:
<del> return None
<del>
<del>
<del># This function is to check if the array is sorted.
<del>def __assert_sorted(collection: List[int]) -> bool:
<del> if collection != sorted(collection):
<del> raise ValueError("Collection must be sorted")
<del> return True
<add> return -1
<ide>
<ide>
<ide> if __name__ == "__main__":
<del> user_input = input("Enter numbers separated by coma:\n").strip()
<del> collection = [int(item) for item in user_input.split(",")]
<del>
<del> try:
<del> __assert_sorted(collection)
<del> except ValueError:
<del> sys.exit("Sequence must be sorted to apply the ternary search")
<del>
<del> target_input = input("Enter a single number to be found in the list:\n")
<del> target = int(target_input)
<add> user_input = input("Enter numbers separated by comma:\n").strip()
<add> collection = [int(item.strip()) for item in user_input.split(",")]
<add> assert collection == sorted(collection), f"List must be ordered.\n{collection}."
<add> target = int(input("Enter the number to be found in the list:\n").strip())
<ide> result1 = ite_ternary_search(collection, target)
<ide> result2 = rec_ternary_search(0, len(collection) - 1, collection, target)
<del>
<del> if result2 is not None:
<add> if result2 != -1:
<ide> print(f"Iterative search: {target} found at positions: {result1}")
<ide> print(f"Recursive search: {target} found at positions: {result2}")
<ide> else: | 2 |
Ruby | Ruby | fix `unknown migration version "6.0"` | 2ea5b8f0578b2ec35881c8b01e6e18aed7e38edc | <ide><path>activerecord/lib/active_record/migration/compatibility.rb
<ide> def self.find(version)
<ide> const_get(name)
<ide> end
<ide>
<del> V5_2 = Current
<add> V6_0 = Current
<add>
<add> class V5_2 < V6_0
<add> end
<ide>
<ide> class V5_1 < V5_2
<ide> def change_column(table_name, column_name, type, options = {}) | 1 |
Python | Python | remove unneeded assignment | 857226896e00f9335a2ab7b46b20785e1523d248 | <ide><path>swivel/prep.py
<ide> def create_vocabulary(lines):
<ide> vocab = [(tok, n) for tok, n in vocab.iteritems() if n >= FLAGS.min_count]
<ide> vocab.sort(key=lambda kv: (-kv[1], kv[0]))
<ide>
<del> num_words = max(len(vocab), FLAGS.shard_size)
<ide> num_words = min(len(vocab), FLAGS.max_vocab)
<ide> if num_words % FLAGS.shard_size != 0:
<ide> num_words -= num_words % FLAGS.shard_size | 1 |
PHP | PHP | use the defined route name in the routes shell | 1b5d1fe39d0b60463348c05e32915cc1e1cdcc7f | <ide><path>src/Shell/RoutesShell.php
<ide> public function main()
<ide> ['Route name', 'URI template', 'Defaults']
<ide> ];
<ide> foreach (Router::routes() as $route) {
<del> $output[] = [$route->getName(), $route->template, json_encode($route->defaults)];
<add> $name = isset($route->options['_name']) ? $route->options['_name'] : $route->getName();
<add> $output[] = [$name, $route->template, json_encode($route->defaults)];
<ide> }
<ide> $this->helper('table')->output($output);
<ide> }
<ide><path>tests/TestCase/Shell/RoutesShellTest.php
<ide> public function setUp()
<ide> $this->shell = new RoutesShell($this->io);
<ide> Router::connect('/articles/:action/*', ['controller' => 'Articles']);
<ide> Router::connect('/bake/:controller/:action', ['plugin' => 'Bake']);
<add> Router::connect('/tests/:action/*', ['controller' => 'Tests'], ['_name' => 'testName']);
<ide> }
<ide>
<ide> /**
<ide> public function testMain()
<ide> 'articles:_action',
<ide> '/articles/:action/*',
<ide> '{"controller":"Articles","action":"index","plugin":null}'
<add> ]),
<add> $this->contains([
<add> 'bake._controller:_action',
<add> '/bake/:controller/:action',
<add> '{"plugin":"Bake","action":"index"}',
<add> ]),
<add> $this->contains([
<add> 'testName',
<add> '/tests/:action/*',
<add> '{"controller":"Tests","action":"index","plugin":null}'
<ide> ])
<ide> )
<ide> ); | 2 |
Javascript | Javascript | simplify error handling | 3e58696c07161ef84c6b12aeb7e03d271563dcb9 | <ide><path>lib/tls.js
<ide> CryptoStream.prototype._push = function() {
<ide> chunkBytes = this._pusher(pool, bytesRead, pool.length - bytesRead);
<ide>
<ide> if (this.pair._ssl && this.pair._ssl.error) {
<del> if (this.pair._secureEstablished) {
<del> this.pair._error();
<del> } else {
<del> this.pair._destroy();
<del> }
<add> this.pair._error();
<ide> return;
<ide> }
<ide>
<ide> CryptoStream.prototype._pull = function() {
<ide> var rv = this._puller(tmp);
<ide>
<ide> if (this.pair._ssl && this.pair._ssl.error) {
<del> if (this.pair._secureEstablished) {
<del> this.pair._error();
<del> } else {
<del> this.pair._destroy();
<del> }
<add> this.pair._error();
<ide> return;
<ide> }
<ide>
<ide> SecurePair.prototype._destroy = function() {
<ide>
<ide>
<ide> SecurePair.prototype._error = function() {
<del> var err = this._ssl.error;
<del> this._ssl.error = null;
<del>
<del> if (this._isServer &&
<del> this._rejectUnauthorized &&
<del> /peer did not return a certificate/.test(err.message)) {
<del> // Not really an error.
<add> if (!this._secureEstablished) {
<ide> this._destroy();
<ide> } else {
<del> this.cleartext.emit('error', err);
<add> var err = this._ssl.error;
<add> this._ssl.error = null;
<add>
<add> if (this._isServer &&
<add> this._rejectUnauthorized &&
<add> /peer did not return a certificate/.test(err.message)) {
<add> // Not really an error.
<add> this._destroy();
<add> } else {
<add> this.cleartext.emit('error', err);
<add> }
<ide> }
<ide> };
<ide> | 1 |
PHP | PHP | sortreplacements | 2f2da143b27e5d2522c76e8e2c0bf0c9e9f79f02 | <ide><path>src/Illuminate/Translation/Translator.php
<ide> protected function sortReplacements(array $replace)
<ide> {
<ide> return (new Collection($replace))->sortBy(function ($value, $key) {
<ide> return mb_strlen($key) * -1;
<del> });
<add> })->all();
<ide> }
<ide>
<ide> /** | 1 |
Javascript | Javascript | allow empty subject even with altnames defined | ff48009fefcecedfee2c6ff1719e5be3f6969049 | <ide><path>lib/tls.js
<ide> exports.checkServerIdentity = function checkServerIdentity(hostname, cert) {
<ide> let valid = false;
<ide> let reason = 'Unknown reason';
<ide>
<add> const hasAltNames =
<add> dnsNames.length > 0 || ips.length > 0 || uriNames.length > 0;
<add>
<add> hostname = unfqdn(hostname); // Remove trailing dot for error messages.
<add>
<ide> if (net.isIP(hostname)) {
<ide> valid = ips.includes(canonicalizeIP(hostname));
<ide> if (!valid)
<ide> reason = `IP: ${hostname} is not in the cert's list: ${ips.join(', ')}`;
<ide> // TODO(bnoordhuis) Also check URI SANs that are IP addresses.
<del> } else if (subject) {
<del> hostname = unfqdn(hostname); // Remove trailing dot for error messages.
<add> } else if (hasAltNames || subject) {
<ide> const hostParts = splitHost(hostname);
<ide> const wildcard = (pattern) => check(hostParts, pattern, true);
<del> const noWildcard = (pattern) => check(hostParts, pattern, false);
<ide>
<del> // Match against Common Name only if no supported identifiers are present.
<del> if (dnsNames.length === 0 && ips.length === 0 && uriNames.length === 0) {
<add> if (hasAltNames) {
<add> const noWildcard = (pattern) => check(hostParts, pattern, false);
<add> valid = dnsNames.some(wildcard) || uriNames.some(noWildcard);
<add> if (!valid)
<add> reason =
<add> `Host: ${hostname}. is not in the cert's altnames: ${altNames}`;
<add> } else {
<add> // Match against Common Name only if no supported identifiers exist.
<ide> const cn = subject.CN;
<ide>
<ide> if (ArrayIsArray(cn))
<ide> exports.checkServerIdentity = function checkServerIdentity(hostname, cert) {
<ide>
<ide> if (!valid)
<ide> reason = `Host: ${hostname}. is not cert's CN: ${cn}`;
<del> } else {
<del> valid = dnsNames.some(wildcard) || uriNames.some(noWildcard);
<del> if (!valid)
<del> reason =
<del> `Host: ${hostname}. is not in the cert's altnames: ${altNames}`;
<ide> }
<ide> } else {
<ide> reason = 'Cert is empty';
<ide><path>test/parallel/test-tls-check-server-identity.js
<ide> const tests = [
<ide> error: 'Cert is empty'
<ide> },
<ide>
<add> // Empty Subject w/DNS name
<add> {
<add> host: 'a.com', cert: {
<add> subjectaltname: 'DNS:a.com',
<add> }
<add> },
<add>
<add> // Empty Subject w/URI name
<add> {
<add> host: 'a.b.a.com', cert: {
<add> subjectaltname: 'URI:http://a.b.a.com/',
<add> }
<add> },
<add>
<ide> // Multiple CN fields
<ide> {
<ide> host: 'foo.com', cert: { | 2 |
Javascript | Javascript | remove unneeded cb check from settimeout() | 8625a3815e95985d7710df3f53ffff4387abd2d9 | <ide><path>lib/_http_outgoing.js
<ide> exports.OutgoingMessage = OutgoingMessage;
<ide> OutgoingMessage.prototype.setTimeout = function(msecs, callback) {
<ide>
<ide> if (callback) {
<del> if (typeof callback !== 'function')
<del> throw new TypeError('callback must be a function');
<ide> this.on('timeout', callback);
<ide> }
<ide> | 1 |
Javascript | Javascript | remove ownerdocument check in offset getter | 6176567361e9d05d4f5eb7e98735a678f2cd7ea9 | <ide><path>src/offset.js
<ide> jQuery.offset = {
<ide>
<ide> jQuery.fn.extend({
<ide> offset: function( options ) {
<add> // Preserve chaining for setter
<ide> if ( arguments.length ) {
<ide> return options === undefined ?
<ide> this :
<ide> jQuery.fn.extend({
<ide> });
<ide> }
<ide>
<del> var docElem, win, rect,
<del> elem = this[ 0 ],
<del> doc = elem && elem.ownerDocument;
<add> var docElem, win, rect, doc,
<add> elem = this[ 0 ];
<ide>
<del> if ( !doc ) {
<add> if ( !elem ) {
<ide> return;
<ide> }
<ide>
<ide> rect = elem.getBoundingClientRect();
<ide>
<ide> // Make sure element is not hidden (display: none) or disconnected
<ide> if ( rect.width || rect.height || elem.getClientRects().length ) {
<add> doc = elem.ownerDocument;
<ide> win = getWindow( doc );
<ide> docElem = doc.documentElement;
<ide>
<ide><path>test/unit/offset.js
<ide> module("offset", { setup: function(){
<ide> */
<ide>
<ide> test("empty set", function() {
<del> expect(2);
<add> expect( 2 );
<ide> strictEqual( jQuery().offset(), undefined, "offset() returns undefined for empty set (#11962)" );
<ide> strictEqual( jQuery().position(), undefined, "position() returns undefined for empty set (#11962)" );
<ide> });
<ide>
<del>test("object without getBoundingClientRect", function() {
<del> expect(2);
<del>
<del> // Simulates a browser without gBCR on elements, we just want to return 0,0
<del> var result = jQuery({ ownerDocument: document }).offset();
<del> equal( result.top, 0, "Check top" );
<del> equal( result.left, 0, "Check left" );
<del>});
<del>
<ide> test("disconnected element", function() {
<ide> expect(1);
<ide> | 2 |
Ruby | Ruby | fix disallowed deprecations with default warning | 569232b5294b11612823b70c7922e5b7ed95b988 | <ide><path>activesupport/lib/active_support/deprecation/disallowed.rb
<ide> def deprecation_disallowed?(message)
<ide> disallowed = ActiveSupport::Deprecation.disallowed_warnings
<ide> return false if explicitly_allowed?(message)
<ide> return true if disallowed == :all
<del> disallowed.any? do |rule|
<add> message && disallowed.any? do |rule|
<ide> case rule
<ide> when String, Symbol
<ide> message.include?(rule.to_s)
<ide> def explicitly_allowed?(message)
<ide> allowances = @explicitly_allowed_warnings.value
<ide> return false unless allowances
<ide> return true if allowances == :all
<del> allowances = [allowances] unless allowances.kind_of?(Array)
<del> allowances.any? do |rule|
<add> message && Array(allowances).any? do |rule|
<ide> case rule
<ide> when String, Symbol
<ide> message.include?(rule.to_s)
<ide><path>activesupport/lib/active_support/deprecation/reporting.rb
<ide> def warn(message = nil, callstack = nil)
<ide> return if silenced
<ide>
<ide> callstack ||= caller_locations(2)
<del> deprecation_message(callstack, message).tap do |m|
<add> deprecation_message(callstack, message).tap do |full_message|
<ide> if deprecation_disallowed?(message)
<del> disallowed_behavior.each { |b| b.call(m, callstack, deprecation_horizon, gem_name) }
<add> disallowed_behavior.each { |b| b.call(full_message, callstack, deprecation_horizon, gem_name) }
<ide> else
<del> behavior.each { |b| b.call(m, callstack, deprecation_horizon, gem_name) }
<add> behavior.each { |b| b.call(full_message, callstack, deprecation_horizon, gem_name) }
<ide> end
<ide> end
<ide> end
<ide><path>activesupport/test/deprecation_test.rb
<ide> def test_different_behaviors_for_allowed_and_disallowed_messages
<ide> assert_match(/fubar/, @c)
<ide> end
<ide>
<add> test "disallowed_warnings with the default warning message" do
<add> @deprecator.disallowed_warnings = :all
<add> assert_disallowed(/./, @deprecator) { @deprecator.warn }
<add>
<add> @deprecator.disallowed_warnings = ["fubar"]
<add> assert_deprecated(/./, @deprecator) { @deprecator.warn }
<add> end
<add>
<ide> def test_allow
<ide> @deprecator.disallowed_warnings = :all
<ide>
<ide> def test_is_a_noop_based_on_if_kwarg_using_proc
<ide> end
<ide> end
<ide>
<add> test "allow with the default warning message" do
<add> @deprecator.disallowed_warnings = :all
<add>
<add> @deprecator.allow(:all) do
<add> assert_deprecated(/./, @deprecator) { @deprecator.warn }
<add> end
<add>
<add> @deprecator.allow(["fubar"]) do
<add> assert_disallowed(/./, @deprecator) { @deprecator.warn }
<add> end
<add> end
<add>
<ide> private
<ide> def deprecator_with_messages
<ide> klass = Class.new(ActiveSupport::Deprecation) | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.